aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Avijit <Avijit.Chakraborty@intel.com>2018-07-25 01:08:01 -0700
committerGravatar Avijit <Avijit.Chakraborty@intel.com>2018-07-25 01:08:01 -0700
commit1cdacb8b10d0b4687387be5fd8be978d68602a1d (patch)
treea2bf88798854a426f073325eb85d85b3ab914418
parentf88a6f93bee89c610fa8b399d037c7a33c1a0a3e (diff)
parent3f454e4060d855f43eebe0cdc27d8c24f906d430 (diff)
Merge remote-tracking branch 'upstream/master'
-rw-r--r--.gitignore1
-rw-r--r--ISSUE_TEMPLATE.md3
-rw-r--r--RELEASE.md18
-rw-r--r--WORKSPACE2
-rw-r--r--configure.py733
-rw-r--r--tensorflow/BUILD14
-rw-r--r--tensorflow/c/c_api.cc34
-rw-r--r--tensorflow/c/c_api.h20
-rw-r--r--tensorflow/c/c_api_function_test.cc3
-rw-r--r--tensorflow/c/c_api_test.cc76
-rw-r--r--tensorflow/c/eager/c_api.cc12
-rw-r--r--tensorflow/c/python_api.cc2
-rw-r--r--tensorflow/cc/gradients/nn_grad.cc85
-rw-r--r--tensorflow/cc/gradients/nn_grad_test.cc22
-rw-r--r--tensorflow/cc/saved_model/BUILD41
-rw-r--r--tensorflow/cc/saved_model/loader.cc70
-rw-r--r--tensorflow/cc/saved_model/reader.cc88
-rw-r--r--tensorflow/cc/saved_model/reader.h39
-rw-r--r--tensorflow/cc/saved_model/reader_test.cc108
-rw-r--r--tensorflow/compiler/jit/BUILD6
-rw-r--r--tensorflow/compiler/jit/deadness_analysis.cc566
-rw-r--r--tensorflow/compiler/jit/deadness_analysis.h68
-rw-r--r--tensorflow/compiler/jit/deadness_analysis_test.cc443
-rw-r--r--tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc40
-rw-r--r--tensorflow/compiler/jit/kernels/xla_launch_op.cc6
-rw-r--r--tensorflow/compiler/jit/mark_for_compilation_pass.cc23
-rw-r--r--tensorflow/compiler/jit/mark_for_compilation_pass_test.cc33
-rw-r--r--tensorflow/compiler/jit/xla_compilation_cache.cc18
-rw-r--r--tensorflow/compiler/jit/xla_device_context.cc117
-rw-r--r--tensorflow/compiler/jit/xla_device_context.h5
-rw-r--r--tensorflow/compiler/jit/xla_device_ops.h7
-rw-r--r--tensorflow/compiler/jit/xla_fusion_optimizer.cc12
-rw-r--r--tensorflow/compiler/jit/xla_launch_util.cc12
-rw-r--r--tensorflow/compiler/jit/xla_launch_util.h6
-rw-r--r--tensorflow/compiler/jit/xla_tensor.cc4
-rw-r--r--tensorflow/compiler/tests/BUILD2
-rw-r--r--tensorflow/compiler/tests/binary_ops_test.py12
-rw-r--r--tensorflow/compiler/tests/cholesky_op_test.py8
-rw-r--r--tensorflow/compiler/tests/conv2d_test.py3
-rw-r--r--tensorflow/compiler/tests/eager_test.py48
-rw-r--r--tensorflow/compiler/tests/qr_op_test.py5
-rw-r--r--tensorflow/compiler/tests/sort_ops_test.py32
-rw-r--r--tensorflow/compiler/tests/unary_ops_test.py56
-rw-r--r--tensorflow/compiler/tf2xla/graph_compiler.cc3
-rw-r--r--tensorflow/compiler/tf2xla/kernels/BUILD1
-rw-r--r--tensorflow/compiler/tf2xla/kernels/diag_op.cc105
-rw-r--r--tensorflow/compiler/tf2xla/kernels/if_op.cc1
-rw-r--r--tensorflow/compiler/tf2xla/kernels/matmul_op.cc12
-rw-r--r--tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc168
-rw-r--r--tensorflow/compiler/tf2xla/kernels/topk_op.cc40
-rw-r--r--tensorflow/compiler/tf2xla/kernels/unary_ops.cc90
-rw-r--r--tensorflow/compiler/tf2xla/kernels/while_op.cc1
-rw-r--r--tensorflow/compiler/tf2xla/lib/BUILD1
-rw-r--r--tensorflow/compiler/tf2xla/lib/scatter.cc2
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve.cc811
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve.h8
-rw-r--r--tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc64
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler.cc10
-rw-r--r--tensorflow/compiler/tf2xla/xla_compiler_test.cc52
-rw-r--r--tensorflow/compiler/tf2xla/xla_op_registry.h2
-rw-r--r--tensorflow/compiler/xla/client/client.cc16
-rw-r--r--tensorflow/compiler/xla/client/client.h12
-rw-r--r--tensorflow/compiler/xla/client/lib/BUILD18
-rw-r--r--tensorflow/compiler/xla/client/lib/arithmetic.cc12
-rw-r--r--tensorflow/compiler/xla/client/lib/arithmetic.h6
-rw-r--r--tensorflow/compiler/xla/client/lib/math.cc160
-rw-r--r--tensorflow/compiler/xla/client/lib/math.h37
-rw-r--r--tensorflow/compiler/xla/client/lib/math_test.cc54
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric.cc64
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric.h14
-rw-r--r--tensorflow/compiler/xla/client/lib/numeric_test.cc43
-rw-r--r--tensorflow/compiler/xla/client/lib/prng.cc150
-rw-r--r--tensorflow/compiler/xla/client/lib/prng.h34
-rw-r--r--tensorflow/compiler/xla/client/xla_client/xla_builder.cc147
-rw-r--r--tensorflow/compiler/xla/client/xla_client/xla_builder.h59
-rw-r--r--tensorflow/compiler/xla/literal.cc4
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.cc32
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.h30
-rw-r--r--tensorflow/compiler/xla/python/local_computation_builder.i28
-rw-r--r--tensorflow/compiler/xla/python/xla_client.py33
-rw-r--r--tensorflow/compiler/xla/python/xla_client_test.py46
-rw-r--r--tensorflow/compiler/xla/service/BUILD3
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier.cc52
-rw-r--r--tensorflow/compiler/xla/service/algebraic_simplifier_test.cc102
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment.cc116
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment.h67
-rw-r--r--tensorflow/compiler/xla/service/buffer_assignment_test.cc198
-rw-r--r--tensorflow/compiler/xla/service/channel_tracker.cc28
-rw-r--r--tensorflow/compiler/xla/service/channel_tracker.h6
-rw-r--r--tensorflow/compiler/xla/service/copy_insertion.cc86
-rw-r--r--tensorflow/compiler/xla/service/copy_insertion.h22
-rw-r--r--tensorflow/compiler/xla/service/cpu/BUILD2
-rw-r--r--tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h5
-rw-r--r--tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc265
-rw-r--r--tensorflow/compiler/xla/service/cpu/dot_op_emitter.h7
-rw-r--r--tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc15
-rw-r--r--tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.h2
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.cc798
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_emitter.h18
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_function.cc71
-rw-r--r--tensorflow/compiler/xla/service/cpu/ir_function.h11
-rw-r--r--tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc53
-rw-r--r--tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc9
-rw-r--r--tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h2
-rw-r--r--tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc8
-rw-r--r--tensorflow/compiler/xla/service/cpu/vector_support_library.cc163
-rw-r--r--tensorflow/compiler/xla/service/cpu/vector_support_library.h36
-rw-r--r--tensorflow/compiler/xla/service/dfs_hlo_visitor.h1
-rw-r--r--tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h3
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter.cc1071
-rw-r--r--tensorflow/compiler/xla/service/elemental_ir_emitter.h14
-rw-r--r--tensorflow/compiler/xla/service/executable.cc13
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.cc5
-rw-r--r--tensorflow/compiler/xla/service/generic_transfer_manager.h3
-rw-r--r--tensorflow/compiler/xla/service/gpu/BUILD20
-rw-r--r--tensorflow/compiler/xla/service/gpu/buffer_allocations.cc8
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc90
-rw-r--r--tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h2
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_constants.cc13
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_constants.h9
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc64
-rw-r--r--tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h11
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc22
-rw-r--r--tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h9
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_manager.cc69
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_manager.h82
-rw-r--r--tensorflow/compiler/xla/service/gpu/infeed_thunk.cc91
-rw-r--r--tensorflow/compiler/xla/service/gpu/instruction_fusion.cc49
-rw-r--r--tensorflow/compiler/xla/service/gpu/instruction_fusion.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc30
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc19
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emission_utils.h16
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.cc282
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter.h22
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc16
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc812
-rw-r--r--tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h5
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD5
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc (renamed from tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc)37
-rw-r--r--tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.h (renamed from tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h)6
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc40
-rw-r--r--tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc94
-rw-r--r--tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc (renamed from tensorflow/compiler/xla/service/gpu/gpu_compiler.cc)92
-rw-r--r--tensorflow/compiler/xla/service/gpu/nvptx_compiler.h (renamed from tensorflow/compiler/xla/service/gpu/gpu_compiler.h)16
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_manager.cc19
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_manager.h27
-rw-r--r--tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc2
-rw-r--r--tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc60
-rw-r--r--tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h6
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/BUILD223
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_alignment_test.cc54
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.cc50
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h42
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_copy_test.cc59
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_ftz_test.cc119
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_fusion_test.cc59
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_index_test.cc147
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_kernel_tiling_test.cc177
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_ldg_test.cc141
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_noalias_test.cc68
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/gpu_unrolling_test.cc185
-rw-r--r--tensorflow/compiler/xla/service/gpu/tests/infeed_test.cc121
-rw-r--r--tensorflow/compiler/xla/service/gpu/thunk.cc59
-rw-r--r--tensorflow/compiler/xla/service/gpu/thunk.h4
-rw-r--r--tensorflow/compiler/xla/service/gpu/xfeed_queue.h90
-rw-r--r--tensorflow/compiler/xla/service/hlo.proto8
-rw-r--r--tensorflow/compiler/xla/service/hlo_alias_analysis.h2
-rw-r--r--tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.cc9
-rw-r--r--tensorflow/compiler/xla/service/hlo_computation.h4
-rw-r--r--tensorflow/compiler/xla/service/hlo_cost_analysis.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_cost_analysis.h1
-rw-r--r--tensorflow/compiler/xla/service/hlo_cse_test.cc179
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_metadata.h6
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_remover.cc4
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_remover.h11
-rw-r--r--tensorflow/compiler/xla/service/hlo_domain_test.cc21
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator.cc116
-rw-r--r--tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h91
-rw-r--r--tensorflow/compiler/xla/service/hlo_graph_dumper.cc1
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.cc165
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction.h58
-rw-r--r--tensorflow/compiler/xla/service/hlo_instruction_test.cc5
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.cc135
-rw-r--r--tensorflow/compiler/xla/service/hlo_instructions.h61
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_metadata.cc57
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_metadata.h18
-rw-r--r--tensorflow/compiler/xla/service/hlo_module_group_util.cc24
-rw-r--r--tensorflow/compiler/xla/service/hlo_opcode.h1
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser.cc60
-rw-r--r--tensorflow/compiler/xla/service/hlo_parser_test.cc26
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.cc11
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization.h10
-rw-r--r--tensorflow/compiler/xla/service/hlo_rematerialization_test.cc2
-rw-r--r--tensorflow/compiler/xla/service/hlo_runner.cc9
-rw-r--r--tensorflow/compiler/xla/service/hlo_scheduling.cc1
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_metadata.cc56
-rw-r--r--tensorflow/compiler/xla/service/hlo_sharding_metadata.h23
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier.cc143
-rw-r--r--tensorflow/compiler/xla/service/hlo_verifier.h5
-rw-r--r--tensorflow/compiler/xla/service/instruction_fusion.cc1
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment.cc104
-rw-r--r--tensorflow/compiler/xla/service/layout_assignment.h7
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/BUILD23
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc3
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc (renamed from tensorflow/compiler/xla/service/llvm_ir/ops.cc)77
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h (renamed from tensorflow/compiler/xla/service/llvm_ir/ops.h)13
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc30
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h4
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.cc54
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/ir_array.h22
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc45
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h56
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc18
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h2
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc101
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h32
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc150
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/llvm_util.h28
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc36
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h11
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/sort_util.cc155
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/sort_util.h39
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc49
-rw-r--r--tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h7
-rw-r--r--tensorflow/compiler/xla/service/platform_util.cc13
-rw-r--r--tensorflow/compiler/xla/service/service.cc3
-rw-r--r--tensorflow/compiler/xla/service/shape_inference.cc19
-rw-r--r--tensorflow/compiler/xla/service/shape_inference_test.cc134
-rw-r--r--tensorflow/compiler/xla/service/transfer_manager.h10
-rw-r--r--tensorflow/compiler/xla/service/while_loop_constant_sinking.cc6
-rw-r--r--tensorflow/compiler/xla/service/while_loop_constant_sinking_test.cc45
-rw-r--r--tensorflow/compiler/xla/shape_tree.h140
-rw-r--r--tensorflow/compiler/xla/shape_tree_test.cc21
-rw-r--r--tensorflow/compiler/xla/shape_util.cc55
-rw-r--r--tensorflow/compiler/xla/shape_util.h15
-rw-r--r--tensorflow/compiler/xla/shape_util_test.cc20
-rw-r--r--tensorflow/compiler/xla/tests/BUILD20
-rw-r--r--tensorflow/compiler/xla/tests/client_library_test_base.cc64
-rw-r--r--tensorflow/compiler/xla/tests/client_library_test_base.h6
-rw-r--r--tensorflow/compiler/xla/tests/conditional_test.cc104
-rw-r--r--tensorflow/compiler/xla/tests/convert_test.cc70
-rw-r--r--tensorflow/compiler/xla/tests/dynamic_ops_test.cc23
-rw-r--r--tensorflow/compiler/xla/tests/fusion_test.cc40
-rw-r--r--tensorflow/compiler/xla/tests/iota_test.cc61
-rw-r--r--tensorflow/compiler/xla/tests/llvm_compiler_test.cc6
-rw-r--r--tensorflow/compiler/xla/tests/local_client_execute_test.cc8
-rw-r--r--tensorflow/compiler/xla/tests/local_client_test_base.cc14
-rw-r--r--tensorflow/compiler/xla/tests/prng_test.cc2
-rw-r--r--tensorflow/compiler/xla/tests/reduce_test.cc43
-rw-r--r--tensorflow/compiler/xla/tests/slice_test.cc6
-rw-r--r--tensorflow/compiler/xla/tests/tuple_test.cc47
-rw-r--r--tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc1
-rw-r--r--tensorflow/compiler/xla/xla.proto1
-rw-r--r--tensorflow/compiler/xla/xla_data.proto16
-rw-r--r--tensorflow/contrib/BUILD6
-rw-r--r--tensorflow/contrib/android/cmake/src/main/AndroidManifest.xml4
-rw-r--r--tensorflow/contrib/autograph/README.md17
-rw-r--r--tensorflow/contrib/autograph/__init__.py19
-rw-r--r--tensorflow/contrib/autograph/converters/BUILD48
-rw-r--r--tensorflow/contrib/autograph/converters/asserts.py8
-rw-r--r--tensorflow/contrib/autograph/converters/asserts_test.py4
-rw-r--r--tensorflow/contrib/autograph/converters/break_statements.py35
-rw-r--r--tensorflow/contrib/autograph/converters/break_statements_test.py62
-rw-r--r--tensorflow/contrib/autograph/converters/builtin_functions_test.py60
-rw-r--r--tensorflow/contrib/autograph/converters/call_trees_test.py76
-rw-r--r--tensorflow/contrib/autograph/converters/conditional_expressions.py129
-rw-r--r--tensorflow/contrib/autograph/converters/conditional_expressions_test.py53
-rw-r--r--tensorflow/contrib/autograph/converters/continue_statements_test.py48
-rw-r--r--tensorflow/contrib/autograph/converters/control_flow.py165
-rw-r--r--tensorflow/contrib/autograph/converters/control_flow_test.py212
-rw-r--r--tensorflow/contrib/autograph/converters/decorators_test.py15
-rw-r--r--tensorflow/contrib/autograph/converters/directives.py108
-rw-r--r--tensorflow/contrib/autograph/converters/directives_test.py78
-rw-r--r--tensorflow/contrib/autograph/converters/error_handlers.py52
-rw-r--r--tensorflow/contrib/autograph/converters/error_handlers_test.py55
-rw-r--r--tensorflow/contrib/autograph/converters/ifexp.py49
-rw-r--r--tensorflow/contrib/autograph/converters/ifexp_test.py106
-rw-r--r--tensorflow/contrib/autograph/converters/list_comprehension.py77
-rw-r--r--tensorflow/contrib/autograph/converters/list_comprehensions.py82
-rw-r--r--tensorflow/contrib/autograph/converters/list_comprehensions_test.py (renamed from tensorflow/contrib/autograph/converters/list_comprehension_test.py)40
-rw-r--r--tensorflow/contrib/autograph/converters/lists.py30
-rw-r--r--tensorflow/contrib/autograph/converters/lists_test.py90
-rw-r--r--tensorflow/contrib/autograph/converters/logical_expressions_test.py13
-rw-r--r--tensorflow/contrib/autograph/converters/name_scopes_test.py90
-rw-r--r--tensorflow/contrib/autograph/converters/return_statements.py (renamed from tensorflow/contrib/autograph/converters/single_return.py)0
-rw-r--r--tensorflow/contrib/autograph/converters/return_statements_test.py167
-rw-r--r--tensorflow/contrib/autograph/converters/side_effect_guards_test.py132
-rw-r--r--tensorflow/contrib/autograph/converters/single_return_test.py189
-rw-r--r--tensorflow/contrib/autograph/converters/slices.py10
-rw-r--r--tensorflow/contrib/autograph/converters/slices_test.py47
-rw-r--r--tensorflow/contrib/autograph/core/converter.py120
-rw-r--r--tensorflow/contrib/autograph/core/converter_testing.py60
-rw-r--r--tensorflow/contrib/autograph/examples/integration_tests/BUILD12
-rw-r--r--tensorflow/contrib/autograph/examples/integration_tests/keras_test.py25
-rw-r--r--tensorflow/contrib/autograph/examples/integration_tests/list_literals_test.py (renamed from tensorflow/contrib/autograph/core/annos.py)28
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb299
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb652
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/algorithms.ipynb1512
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb666
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb1493
-rw-r--r--tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb148
-rw-r--r--tensorflow/contrib/autograph/impl/api.py24
-rw-r--r--tensorflow/contrib/autograph/impl/api_test.py15
-rw-r--r--tensorflow/contrib/autograph/impl/conversion.py76
-rw-r--r--tensorflow/contrib/autograph/impl/conversion_test.py8
-rw-r--r--tensorflow/contrib/autograph/lang/special_functions.py37
-rw-r--r--tensorflow/contrib/autograph/lang/special_functions_test.py18
-rw-r--r--tensorflow/contrib/autograph/operators/data_structures.py91
-rw-r--r--tensorflow/contrib/autograph/operators/data_structures_test.py41
-rw-r--r--tensorflow/contrib/autograph/pyct/anno.py89
-rw-r--r--tensorflow/contrib/autograph/pyct/anno_test.py23
-rw-r--r--tensorflow/contrib/autograph/pyct/ast_util.py175
-rw-r--r--tensorflow/contrib/autograph/pyct/ast_util_test.py142
-rw-r--r--tensorflow/contrib/autograph/pyct/cfg.py142
-rw-r--r--tensorflow/contrib/autograph/pyct/cfg_test.py213
-rw-r--r--tensorflow/contrib/autograph/pyct/compiler.py111
-rw-r--r--tensorflow/contrib/autograph/pyct/compiler_test.py4
-rw-r--r--tensorflow/contrib/autograph/pyct/origin_info.py56
-rw-r--r--tensorflow/contrib/autograph/pyct/qual_names.py28
-rw-r--r--tensorflow/contrib/autograph/pyct/qual_names_test.py9
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/BUILD25
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/activity.py226
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py76
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/annos.py10
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/cfg.py446
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py303
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/live_values.py44
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py5
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/liveness.py200
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py149
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py301
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py263
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/type_info.py48
-rw-r--r--tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py5
-rw-r--r--tensorflow/contrib/autograph/pyct/templates.py88
-rw-r--r--tensorflow/contrib/autograph/pyct/templates_test.py6
-rw-r--r--tensorflow/contrib/autograph/pyct/transformer.py156
-rw-r--r--tensorflow/contrib/autograph/pyct/transformer_test.py159
-rw-r--r--tensorflow/contrib/autograph/utils/BUILD1
-rw-r--r--tensorflow/contrib/autograph/utils/__init__.py1
-rw-r--r--tensorflow/contrib/autograph/utils/builtins.py2
-rw-r--r--tensorflow/contrib/autograph/utils/builtins_test.py3
-rw-r--r--tensorflow/contrib/bigtable/README.md338
-rw-r--r--tensorflow/contrib/bigtable/__init__.py6
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc2
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lib.cc8
-rw-r--r--tensorflow/contrib/bigtable/kernels/bigtable_lib.h3
-rw-r--r--tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py2
-rw-r--r--tensorflow/contrib/bigtable/python/ops/bigtable_api.py17
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py9
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/estimator.py43
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py29
-rw-r--r--tensorflow/contrib/boosted_trees/estimator_batch/model.py74
-rw-r--r--tensorflow/contrib/boosted_trees/examples/boston.py4
-rw-r--r--tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc17
-rw-r--r--tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc24
-rw-r--r--tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py18
-rw-r--r--tensorflow/contrib/checkpoint/python/containers.py6
-rw-r--r--tensorflow/contrib/cloud/README.md4
-rw-r--r--tensorflow/contrib/cloud/__init__.py4
-rw-r--r--tensorflow/contrib/cmake/CMakeLists.txt45
-rw-r--r--tensorflow/contrib/cmake/python_modules.txt3
-rw-r--r--tensorflow/contrib/cmake/tf_core_kernels.cmake1
-rwxr-xr-xtensorflow/contrib/cmake/tf_python.cmake14
-rw-r--r--tensorflow/contrib/cmake/tf_tests.cmake4
-rw-r--r--tensorflow/contrib/copy_graph/python/util/copy_elements.py6
-rw-r--r--tensorflow/contrib/crf/__init__.py2
-rw-r--r--tensorflow/contrib/crf/python/kernel_tests/crf_test.py62
-rw-r--r--tensorflow/contrib/crf/python/ops/crf.py52
-rw-r--r--tensorflow/contrib/data/__init__.py2
-rw-r--r--tensorflow/contrib/data/kernels/BUILD11
-rw-r--r--tensorflow/contrib/data/kernels/assert_next_dataset_op.cc152
-rw-r--r--tensorflow/contrib/data/kernels/csv_dataset_op.cc153
-rw-r--r--tensorflow/contrib/data/kernels/prefetching_kernels.cc463
-rw-r--r--tensorflow/contrib/data/ops/dataset_ops.cc99
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/BUILD6
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/csv_dataset_op_test.py143
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py82
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py66
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py151
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py875
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/serialization/BUILD14
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/serialization/csv_dataset_serialization_test.py73
-rw-r--r--tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py255
-rw-r--r--tensorflow/contrib/data/python/ops/BUILD3
-rw-r--r--tensorflow/contrib/data/python/ops/get_single_element.py30
-rw-r--r--tensorflow/contrib/data/python/ops/optimization.py53
-rw-r--r--tensorflow/contrib/data/python/ops/prefetching_ops.py177
-rw-r--r--tensorflow/contrib/data/python/ops/readers.py17
-rw-r--r--tensorflow/contrib/data/python/ops/sliding.py69
-rw-r--r--tensorflow/contrib/distribute/README.md2
-rw-r--r--tensorflow/contrib/distribute/python/BUILD37
-rw-r--r--tensorflow/contrib/distribute/python/checkpoint_utils_test.py72
-rw-r--r--tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py69
-rw-r--r--tensorflow/contrib/distribute/python/values.py137
-rw-r--r--tensorflow/contrib/distribute/python/values_test.py25
-rw-r--r--tensorflow/contrib/distribute/python/warm_starting_util_test.py97
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py8
-rw-r--r--tensorflow/contrib/eager/python/datasets.py64
-rw-r--r--tensorflow/contrib/eager/python/datasets_test.py14
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/BUILD19
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/densenet.py48
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/densenet_graph_test.py149
-rw-r--r--tensorflow/contrib/eager/python/examples/densenet/densenet_test.py241
-rw-r--r--tensorflow/contrib/eager/python/examples/gan/mnist.py19
-rw-r--r--tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb733
-rw-r--r--tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb2
-rw-r--r--tensorflow/contrib/eager/python/examples/l2hmc/README.md54
-rw-r--r--tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py145
-rw-r--r--tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py97
-rw-r--r--tensorflow/contrib/eager/python/examples/l2hmc/main.py235
-rw-r--r--tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py5
-rw-r--r--tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb7
-rw-r--r--tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py24
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/BUILD36
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/blocks.py386
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/cifar_input.py2
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/config.py61
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/main.py95
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/main_estimator.py200
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/main_estimator_tpu.py328
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/revnet.py112
-rw-r--r--tensorflow/contrib/eager/python/examples/revnet/revnet_test.py27
-rw-r--r--tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb.py4
-rw-r--r--tensorflow/contrib/eager/python/examples/sagan/sagan.py2
-rw-r--r--tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb2
-rw-r--r--tensorflow/contrib/eager/python/metrics_impl.py10
-rw-r--r--tensorflow/contrib/eager/python/saver.py4
-rw-r--r--tensorflow/contrib/eager/python/tfe_test.py7
-rw-r--r--tensorflow/contrib/estimator/BUILD204
-rw-r--r--tensorflow/contrib/estimator/python/estimator/early_stopping.py9
-rw-r--r--tensorflow/contrib/estimator/python/estimator/early_stopping_test.py13
-rw-r--r--tensorflow/contrib/estimator/python/estimator/head.py16
-rw-r--r--tensorflow/contrib/estimator/python/estimator/head_test.py14
-rw-r--r--tensorflow/contrib/estimator/python/estimator/hooks.py2
-rw-r--r--tensorflow/contrib/estimator/python/estimator/hooks_test.py2
-rw-r--r--tensorflow/contrib/framework/python/ops/variables.py97
-rw-r--r--tensorflow/contrib/framework/python/ops/variables_test.py10
-rw-r--r--tensorflow/contrib/fused_conv/kernels/fused_conv2d_bias_activation_op.cc33
-rw-r--r--tensorflow/contrib/fused_conv/kernels/fused_conv_ops_gpu.h12
-rw-r--r--tensorflow/contrib/fused_conv/ops/fused_conv2d_bias_activation_op.cc4
-rw-r--r--tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py6
-rw-r--r--tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py25
-rw-r--r--tensorflow/contrib/gan/BUILD10
-rw-r--r--tensorflow/contrib/gan/python/features/python/random_tensor_pool_impl.py37
-rw-r--r--tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py19
-rw-r--r--tensorflow/contrib/gan/python/losses/python/losses_impl.py16
-rw-r--r--tensorflow/contrib/gan/python/namedtuples.py50
-rw-r--r--tensorflow/contrib/gan/python/train.py238
-rw-r--r--tensorflow/contrib/gan/python/train_test.py843
-rw-r--r--tensorflow/contrib/gdr/gdr_memory_manager.cc32
-rw-r--r--tensorflow/contrib/graph_editor/reroute.py12
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py33
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/head.py10
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/run_config.py4
-rw-r--r--tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py14
-rw-r--r--tensorflow/contrib/lite/BUILD7
-rw-r--r--tensorflow/contrib/lite/Makefile3
-rw-r--r--tensorflow/contrib/lite/arena_planner.cc42
-rw-r--r--tensorflow/contrib/lite/arena_planner.h17
-rw-r--r--tensorflow/contrib/lite/arena_planner_test.cc8
-rw-r--r--tensorflow/contrib/lite/build_def.bzl419
-rwxr-xr-xtensorflow/contrib/lite/build_ios_universal_lib.sh3
-rw-r--r--tensorflow/contrib/lite/builtin_op_data.h9
-rw-r--r--tensorflow/contrib/lite/builtin_ops.h4
-rw-r--r--tensorflow/contrib/lite/context.h6
-rw-r--r--tensorflow/contrib/lite/delegates/eager/BUILD97
-rw-r--r--tensorflow/contrib/lite/delegates/eager/buffer_map.cc107
-rw-r--r--tensorflow/contrib/lite/delegates/eager/buffer_map.h61
-rw-r--r--tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc174
-rw-r--r--tensorflow/contrib/lite/delegates/eager/delegate_data.cc46
-rw-r--r--tensorflow/contrib/lite/delegates/eager/delegate_data.h48
-rw-r--r--tensorflow/contrib/lite/delegates/eager/delegate_data_test.cc44
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util.cc72
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util.h42
-rw-r--r--tensorflow/contrib/lite/delegates/eager/util_test.cc113
-rw-r--r--tensorflow/contrib/lite/delegates/nnapi/BUILD1
-rw-r--r--tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc12
-rw-r--r--tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc35
-rwxr-xr-xtensorflow/contrib/lite/download_dependencies.sh2
-rw-r--r--tensorflow/contrib/lite/examples/android/app/README.md6
-rw-r--r--tensorflow/contrib/lite/examples/android/app/build.gradle2
-rw-r--r--tensorflow/contrib/lite/examples/label_image/BUILD1
-rw-r--r--tensorflow/contrib/lite/experimental/c/BUILD63
-rw-r--r--tensorflow/contrib/lite/experimental/c/c_api.cc118
-rw-r--r--tensorflow/contrib/lite/experimental/c/c_api.h149
-rw-r--r--tensorflow/contrib/lite/experimental/c/c_api_test.cc84
-rw-r--r--tensorflow/contrib/lite/experimental/c/exported_symbols.lds1
-rw-r--r--tensorflow/contrib/lite/experimental/c/version_script.lds9
-rw-r--r--tensorflow/contrib/lite/g3doc/README.md4
-rw-r--r--tensorflow/contrib/lite/g3doc/_book.yaml58
-rw-r--r--tensorflow/contrib/lite/g3doc/_index.yaml67
-rw-r--r--tensorflow/contrib/lite/g3doc/_project.yaml10
-rw-r--r--tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml6
-rw-r--r--tensorflow/contrib/lite/g3doc/api_docs/python/index.md10
-rw-r--r--tensorflow/contrib/lite/g3doc/apis.md4
-rw-r--r--tensorflow/contrib/lite/g3doc/benchmarks.md178
-rw-r--r--tensorflow/contrib/lite/g3doc/custom_operators.md47
-rw-r--r--tensorflow/contrib/lite/g3doc/demo_android.md (renamed from tensorflow/docs_src/mobile/tflite/demo_android.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/demo_ios.md (renamed from tensorflow/docs_src/mobile/tflite/demo_ios.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/devguide.md (renamed from tensorflow/docs_src/mobile/tflite/devguide.md)27
-rw-r--r--tensorflow/contrib/lite/g3doc/ios.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/models.md35
-rw-r--r--tensorflow/contrib/lite/g3doc/ops_versioning.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/overview.md (renamed from tensorflow/docs_src/mobile/tflite/index.md)19
-rw-r--r--tensorflow/contrib/lite/g3doc/performance.md (renamed from tensorflow/docs_src/mobile/tflite/performance.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/rpi.md3
-rw-r--r--tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md15
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/android_build.md (renamed from tensorflow/docs_src/mobile/android_build.md)6
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/index.md (renamed from tensorflow/docs_src/mobile/mobile_intro.md)50
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/ios_build.md (renamed from tensorflow/docs_src/mobile/ios_build.md)3
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/linking_libs.md (renamed from tensorflow/docs_src/mobile/linking_libs.md)109
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/optimizing.md (renamed from tensorflow/docs_src/mobile/optimizing.md)11
-rw-r--r--tensorflow/contrib/lite/g3doc/tfmobile/prepare_models.md (renamed from tensorflow/docs_src/mobile/prepare_models.md)7
-rw-r--r--tensorflow/contrib/lite/interpreter.cc22
-rw-r--r--tensorflow/contrib/lite/interpreter.h2
-rw-r--r--tensorflow/contrib/lite/java/AndroidManifest.xml11
-rw-r--r--tensorflow/contrib/lite/java/BUILD5
-rw-r--r--tensorflow/contrib/lite/java/demo/app/build.gradle2
-rw-r--r--tensorflow/contrib/lite/java/ovic/BUILD1
-rw-r--r--tensorflow/contrib/lite/java/ovic/demo/app/build.gradle2
-rw-r--r--tensorflow/contrib/lite/kernels/BUILD196
-rw-r--r--tensorflow/contrib/lite/kernels/activations.cc54
-rw-r--r--tensorflow/contrib/lite/kernels/activations_test.cc23
-rw-r--r--tensorflow/contrib/lite/kernels/add.cc85
-rw-r--r--tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc57
-rw-r--r--tensorflow/contrib/lite/kernels/bidirectional_sequence_rnn.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/concatenation.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/conv.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/depthwise_conv.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/div.cc62
-rw-r--r--tensorflow/contrib/lite/kernels/div_test.cc61
-rw-r--r--tensorflow/contrib/lite/kernels/eigen_support.cc11
-rw-r--r--tensorflow/contrib/lite/kernels/embedding_lookup.cc4
-rw-r--r--tensorflow/contrib/lite/kernels/embedding_lookup_test.cc36
-rw-r--r--tensorflow/contrib/lite/kernels/fake_quant.cc11
-rw-r--r--tensorflow/contrib/lite/kernels/fully_connected.cc11
-rw-r--r--tensorflow/contrib/lite/kernels/fully_connected_test.cc12
-rw-r--r--tensorflow/contrib/lite/kernels/hashtable_lookup.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/internal/BUILD22
-rw-r--r--tensorflow/contrib/lite/kernels/internal/common.h133
-rw-r--r--tensorflow/contrib/lite/kernels/internal/kernel_utils.cc23
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h239
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc71
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h4
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h797
-rw-r--r--tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h6
-rw-r--r--tensorflow/contrib/lite/kernels/internal/quantization_util.h10
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h234
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc7
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h10
-rw-r--r--tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h1039
-rw-r--r--tensorflow/contrib/lite/kernels/internal/tensor_utils.h4
-rw-r--r--tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc16
-rw-r--r--tensorflow/contrib/lite/kernels/internal/types.h120
-rw-r--r--tensorflow/contrib/lite/kernels/lsh_projection.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/lstm.cc8
-rw-r--r--tensorflow/contrib/lite/kernels/lstm_test.cc3
-rw-r--r--tensorflow/contrib/lite/kernels/pack.cc131
-rw-r--r--tensorflow/contrib/lite/kernels/pack_test.cc120
-rw-r--r--tensorflow/contrib/lite/kernels/pooling.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/pow_test.cc28
-rw-r--r--tensorflow/contrib/lite/kernels/reduce.cc115
-rw-r--r--tensorflow/contrib/lite/kernels/reduce_test.cc388
-rw-r--r--tensorflow/contrib/lite/kernels/register.cc8
-rw-r--r--tensorflow/contrib/lite/kernels/reshape.cc69
-rw-r--r--tensorflow/contrib/lite/kernels/sparse_to_dense.cc1
-rw-r--r--tensorflow/contrib/lite/kernels/sub.cc98
-rw-r--r--tensorflow/contrib/lite/kernels/sub_test.cc58
-rw-r--r--tensorflow/contrib/lite/kernels/svdf.cc3
-rw-r--r--tensorflow/contrib/lite/kernels/transpose_conv.cc112
-rw-r--r--tensorflow/contrib/lite/kernels/transpose_conv_test.cc121
-rw-r--r--tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc4
-rw-r--r--tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc1
-rw-r--r--tensorflow/contrib/lite/model.cc14
-rw-r--r--tensorflow/contrib/lite/model_test.cc1
-rw-r--r--tensorflow/contrib/lite/models/smartreply/BUILD3
-rw-r--r--tensorflow/contrib/lite/nnapi_delegate.cc13
-rw-r--r--tensorflow/contrib/lite/profiling/BUILD1
-rw-r--r--tensorflow/contrib/lite/profiling/profile_summarizer.cc21
-rw-r--r--tensorflow/contrib/lite/python/BUILD11
-rw-r--r--tensorflow/contrib/lite/python/convert.py9
-rw-r--r--tensorflow/contrib/lite/python/interpreter.py6
-rw-r--r--tensorflow/contrib/lite/python/interpreter_test.py2
-rw-r--r--tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc71
-rw-r--r--tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h22
-rw-r--r--tensorflow/contrib/lite/python/lite.py61
-rw-r--r--tensorflow/contrib/lite/python/tflite_convert.py2
-rw-r--r--tensorflow/contrib/lite/schema/BUILD6
-rw-r--r--tensorflow/contrib/lite/schema/builtin_ops_header/BUILD2
-rw-r--r--tensorflow/contrib/lite/schema/schema.fbs20
-rwxr-xr-xtensorflow/contrib/lite/schema/schema_generated.h297
-rw-r--r--tensorflow/contrib/lite/simple_memory_arena.cc2
-rw-r--r--tensorflow/contrib/lite/testdata/add.binbin0 -> 476 bytes
-rw-r--r--tensorflow/contrib/lite/testing/BUILD3
-rw-r--r--tensorflow/contrib/lite/testing/generate_examples.py118
-rw-r--r--tensorflow/contrib/lite/testing/generate_testspec.cc12
-rw-r--r--tensorflow/contrib/lite/testing/generate_testspec.h2
-rw-r--r--tensorflow/contrib/lite/testing/generated_examples_zip_test.cc3
-rw-r--r--tensorflow/contrib/lite/testing/join.h3
-rw-r--r--tensorflow/contrib/lite/testing/test_runner.h2
-rw-r--r--tensorflow/contrib/lite/testing/tf_driver.cc4
-rw-r--r--tensorflow/contrib/lite/testing/tflite_diff_flags.h2
-rw-r--r--tensorflow/contrib/lite/toco/BUILD14
-rw-r--r--tensorflow/contrib/lite/toco/export_tensorflow.cc209
-rw-r--r--tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md2
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/convert_expanddims_to_reshape.cc5
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/convert_trivial_pack_to_reshape.cc (renamed from tensorflow/contrib/lite/toco/graph_transformations/convert_trivial_stack_to_reshape.cc)30
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc1
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h10
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc11
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc11
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc18
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc127
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc69
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h20
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantize.cc13
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc6
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc78
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc112
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/remove_trivial_reshape.cc9
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc46
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_gather.cc12
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_pack.cc (renamed from tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_stack.cc)24
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc8
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc80
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_gather_attributes.cc53
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc (renamed from tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc)27
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/tests/BUILD3
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/unfuse_activation_functions.cc9
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/unpartition_embedding_lookup.cc2
-rw-r--r--tensorflow/contrib/lite/toco/graph_transformations/unroll_batch_matmul.cc16
-rw-r--r--tensorflow/contrib/lite/toco/import_tensorflow.cc122
-rw-r--r--tensorflow/contrib/lite/toco/model.h128
-rw-r--r--tensorflow/contrib/lite/toco/model_cmdline_flags.cc4
-rw-r--r--tensorflow/contrib/lite/toco/python/BUILD5
-rw-r--r--tensorflow/contrib/lite/toco/tensorflow_graph_matching/BUILD1
-rw-r--r--tensorflow/contrib/lite/toco/tflite/BUILD4
-rw-r--r--tensorflow/contrib/lite/toco/tflite/export_test.cc2
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator.cc79
-rw-r--r--tensorflow/contrib/lite/toco/tflite/operator_test.cc10
-rw-r--r--tensorflow/contrib/lite/toco/toco_tooling.cc10
-rw-r--r--tensorflow/contrib/lite/toco/tooling_util.cc27
-rw-r--r--tensorflow/contrib/lite/toco/tooling_util_test.cc17
-rw-r--r--tensorflow/contrib/lite/tools/BUILD2
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/BUILD32
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/README.md2
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc12
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_model.h7
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_params.h4
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_test.cc74
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.cc6
-rw-r--r--tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.h4
-rw-r--r--tensorflow/contrib/lite/util.h7
-rw-r--r--tensorflow/contrib/lookup/lookup_ops_test.py15
-rw-r--r--tensorflow/contrib/makefile/proto_text_cc_files.txt1
-rw-r--r--tensorflow/contrib/makefile/tf_op_files.txt2
-rw-r--r--tensorflow/contrib/metrics/python/ops/metric_ops.py18
-rw-r--r--tensorflow/contrib/metrics/python/ops/metric_ops_test.py5
-rw-r--r--tensorflow/contrib/model_pruning/README.md11
-rw-r--r--tensorflow/contrib/model_pruning/python/learning.py11
-rw-r--r--tensorflow/contrib/model_pruning/python/pruning.py8
-rw-r--r--tensorflow/contrib/nccl/kernels/nccl_manager.cc2
-rw-r--r--tensorflow/contrib/optimizer_v2/BUILD1
-rw-r--r--tensorflow/contrib/optimizer_v2/rmsprop_test.py718
-rw-r--r--tensorflow/contrib/proto/BUILD12
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/BUILD80
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py68
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py261
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py303
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test.py36
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test_base.py176
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py152
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py177
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py419
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/test_base.py407
-rw-r--r--tensorflow/contrib/proto/python/kernel_tests/test_example.proto159
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn.py2
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/BUILD3
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py52
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py8
-rw-r--r--tensorflow/contrib/rpc/python/kernel_tests/test_example.proto147
-rw-r--r--tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py42
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py14
-rw-r--r--tensorflow/contrib/signal/python/kernel_tests/mel_ops_test.py13
-rw-r--r--tensorflow/contrib/signal/python/ops/mel_ops.py24
-rw-r--r--tensorflow/contrib/summary/summary_ops_test.py18
-rw-r--r--tensorflow/contrib/tensor_forest/BUILD2
-rw-r--r--tensorflow/contrib/tensorrt/BUILD100
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_graph.cc119
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_nodes.cc1521
-rw-r--r--tensorflow/contrib/tensorrt/convert/convert_nodes.h27
-rw-r--r--tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc20
-rw-r--r--tensorflow/contrib/tensorrt/convert/utils.cc35
-rw-r--r--tensorflow/contrib/tensorrt/convert/utils.h2
-rw-r--r--tensorflow/contrib/tensorrt/custom_plugin_examples/BUILD2
-rw-r--r--tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op_kernel.cu.cc2
-rw-r--r--tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc20
-rw-r--r--tensorflow/contrib/tensorrt/kernels/trt_engine_op.h2
-rw-r--r--tensorflow/contrib/tensorrt/ops/trt_engine_op.cc10
-rw-r--r--tensorflow/contrib/tensorrt/plugin/trt_plugin_factory.cc3
-rw-r--r--tensorflow/contrib/tensorrt/python/__init__.py1
-rw-r--r--tensorflow/contrib/tensorrt/python/trt_convert.py1
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_allocator.cc65
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_allocator.h31
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_allocator_test.cc79
-rw-r--r--tensorflow/contrib/tensorrt/resources/trt_resources.h2
-rw-r--r--tensorflow/contrib/tensorrt/segment/segment.cc188
-rw-r--r--tensorflow/contrib/tensorrt/segment/segment.h20
-rw-r--r--tensorflow/contrib/tensorrt/segment/segment_test.cc473
-rw-r--r--tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc62
-rw-r--r--tensorflow/contrib/tensorrt/test/base_test.py126
-rw-r--r--tensorflow/contrib/tensorrt/test/batch_matmul_test.py76
-rw-r--r--tensorflow/contrib/tensorrt/test/biasadd_matmul_test.py112
-rw-r--r--tensorflow/contrib/tensorrt/test/binary_tensor_weight_broadcast_test.py119
-rw-r--r--tensorflow/contrib/tensorrt/test/concatenation_test.py83
-rw-r--r--tensorflow/contrib/tensorrt/test/const_broadcast_test.py68
-rw-r--r--tensorflow/contrib/tensorrt/test/multi_connection_neighbor_engine_test.py87
-rw-r--r--tensorflow/contrib/tensorrt/test/neighboring_engine_test.py69
-rw-r--r--tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py347
-rw-r--r--tensorflow/contrib/tensorrt/test/tf_trt_integration_test_base.py328
-rw-r--r--tensorflow/contrib/tensorrt/test/unary_test.py110
-rw-r--r--tensorflow/contrib/tensorrt/test/vgg_block_nchw_test.py82
-rw-r--r--tensorflow/contrib/tensorrt/test/vgg_block_test.py73
-rw-r--r--tensorflow/contrib/tensorrt/trt_conversion.i12
-rw-r--r--tensorflow/contrib/tpu/BUILD17
-rw-r--r--tensorflow/contrib/tpu/__init__.py2
-rw-r--r--tensorflow/contrib/tpu/proto/BUILD10
-rw-r--r--tensorflow/contrib/tpu/proto/optimization_parameters.proto162
-rw-r--r--tensorflow/contrib/tpu/proto/tpu_embedding_config.proto16
-rw-r--r--tensorflow/contrib/tpu/python/tpu/error_handling.py132
-rw-r--r--tensorflow/contrib/tpu/python/tpu/keras_support.py586
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu.py60
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_context.py59
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_estimator.py234
-rw-r--r--tensorflow/contrib/verbs/rdma_mgr.cc30
-rw-r--r--tensorflow/core/BUILD35
-rw-r--r--tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt6
-rw-r--r--tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt9
-rw-r--r--tensorflow/core/common_runtime/bfc_allocator.cc52
-rw-r--r--tensorflow/core/common_runtime/bfc_allocator.h37
-rw-r--r--tensorflow/core/common_runtime/direct_session.cc45
-rw-r--r--tensorflow/core/common_runtime/direct_session_test.cc139
-rw-r--r--tensorflow/core/common_runtime/eager/context.cc26
-rw-r--r--tensorflow/core/common_runtime/eager/context.h13
-rw-r--r--tensorflow/core/common_runtime/eager/execute.cc133
-rw-r--r--tensorflow/core/common_runtime/eager/kernel_and_device.cc14
-rw-r--r--tensorflow/core/common_runtime/eager/kernel_and_device.h3
-rw-r--r--tensorflow/core/common_runtime/eager/kernel_and_device_test.cc8
-rw-r--r--tensorflow/core/common_runtime/eager/tensor_handle.cc13
-rw-r--r--tensorflow/core/common_runtime/eager/tensor_handle.h8
-rw-r--r--tensorflow/core/common_runtime/executor.cc23
-rw-r--r--tensorflow/core/common_runtime/executor.h3
-rw-r--r--tensorflow/core/common_runtime/function.cc19
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_device.cc4
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc74
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_event_mgr.h22
-rw-r--r--tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc23
-rw-r--r--tensorflow/core/common_runtime/placer.cc35
-rw-r--r--tensorflow/core/common_runtime/placer.h2
-rw-r--r--tensorflow/core/common_runtime/placer_test.cc44
-rw-r--r--tensorflow/core/common_runtime/process_state.cc2
-rw-r--r--tensorflow/core/common_runtime/process_state.h4
-rw-r--r--tensorflow/core/common_runtime/session.cc20
-rw-r--r--tensorflow/core/common_runtime/session_factory.h7
-rw-r--r--tensorflow/core/common_runtime/session_test.cc6
-rw-r--r--tensorflow/core/common_runtime/threadpool_device.cc16
-rw-r--r--tensorflow/core/debug/BUILD55
-rw-r--r--tensorflow/core/debug/debug_gateway.cc122
-rw-r--r--tensorflow/core/debug/debug_gateway.h83
-rw-r--r--tensorflow/core/debug/debug_gateway_test.cc1011
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_client.h1
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_service_impl.cc36
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_service_impl.h3
-rw-r--r--tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc79
-rw-r--r--tensorflow/core/distributed_runtime/eager/remote_execute_node.h19
-rw-r--r--tensorflow/core/distributed_runtime/graph_mgr.cc2
-rw-r--r--tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc1
-rw-r--r--tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.cc14
-rw-r--r--tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h15
-rw-r--r--tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.cc5
-rw-r--r--tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.h6
-rw-r--r--tensorflow/core/distributed_runtime/rpc/grpc_session.cc15
-rw-r--r--tensorflow/core/framework/allocator.cc41
-rw-r--r--tensorflow/core/framework/allocator.h12
-rw-r--r--tensorflow/core/framework/allocator_registry.cc120
-rw-r--r--tensorflow/core/framework/allocator_registry.h111
-rw-r--r--tensorflow/core/framework/device_base.h4
-rw-r--r--tensorflow/core/framework/function.cc5
-rw-r--r--tensorflow/core/framework/function.h8
-rw-r--r--tensorflow/core/framework/op_kernel.cc45
-rw-r--r--tensorflow/core/framework/op_kernel.h10
-rw-r--r--tensorflow/core/framework/op_kernel_test.cc18
-rw-r--r--tensorflow/core/framework/register_types.h6
-rw-r--r--tensorflow/core/framework/tensor.h1
-rw-r--r--tensorflow/core/graph/algorithm.cc37
-rw-r--r--tensorflow/core/graph/algorithm.h16
-rw-r--r--tensorflow/core/graph/algorithm_test.cc52
-rw-r--r--tensorflow/core/graph/graph_constructor.cc8
-rw-r--r--tensorflow/core/grappler/clusters/cluster.cc8
-rw-r--r--tensorflow/core/grappler/clusters/cluster.h3
-rw-r--r--tensorflow/core/grappler/costs/graph_properties.cc18
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_test.cc38
-rw-r--r--tensorflow/core/grappler/costs/graph_properties_testdata/function_functional_while.pbtxt239
-rw-r--r--tensorflow/core/grappler/costs/op_level_cost_estimator.cc135
-rw-r--r--tensorflow/core/grappler/costs/virtual_scheduler.cc14
-rw-r--r--tensorflow/core/grappler/optimizers/data/BUILD3
-rw-r--r--tensorflow/core/grappler/optimizers/data/graph_utils.cc4
-rw-r--r--tensorflow/core/kernels/BUILD49
-rw-r--r--tensorflow/core/kernels/argmax_op.cc6
-rw-r--r--tensorflow/core/kernels/cast_op.cc78
-rw-r--r--tensorflow/core/kernels/cast_op.h2
-rw-r--r--tensorflow/core/kernels/cast_op_gpu.cu.cc8
-rw-r--r--tensorflow/core/kernels/cast_op_impl.h30
-rw-r--r--tensorflow/core/kernels/cast_op_impl_uint32.cc46
-rw-r--r--tensorflow/core/kernels/cast_op_impl_uint64.cc46
-rw-r--r--tensorflow/core/kernels/cast_op_test.cc16
-rw-r--r--tensorflow/core/kernels/conv_grad_filter_ops.cc1
-rw-r--r--tensorflow/core/kernels/conv_grad_input_ops.cc1
-rw-r--r--tensorflow/core/kernels/conv_grad_ops_3d.cc2
-rw-r--r--tensorflow/core/kernels/conv_ops.cc1
-rw-r--r--tensorflow/core/kernels/conv_ops_3d.cc1
-rw-r--r--tensorflow/core/kernels/conv_ops_gpu.h20
-rw-r--r--tensorflow/core/kernels/conv_ops_test.cc62
-rw-r--r--tensorflow/core/kernels/ctc_loss_op.cc6
-rw-r--r--tensorflow/core/kernels/cuda_solvers.cc2
-rw-r--r--tensorflow/core/kernels/data/iterator_ops.cc105
-rw-r--r--tensorflow/core/kernels/data/optimize_dataset_op.cc40
-rw-r--r--tensorflow/core/kernels/data/slide_dataset_op.cc158
-rw-r--r--tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc10
-rw-r--r--tensorflow/core/kernels/data/stats_dataset_ops.cc5
-rw-r--r--tensorflow/core/kernels/decode_proto_op.cc367
-rw-r--r--tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc486
-rw-r--r--tensorflow/core/kernels/depthwise_conv_ops_test.cc114
-rw-r--r--tensorflow/core/kernels/deserialize_sparse_string_op.cc5
-rw-r--r--tensorflow/core/kernels/edit_distance_op.cc13
-rw-r--r--tensorflow/core/kernels/encode_proto_op.cc284
-rw-r--r--tensorflow/core/kernels/functional_ops.cc1
-rw-r--r--tensorflow/core/kernels/gather_nd_op.cc7
-rw-r--r--tensorflow/core/kernels/identity_op.cc1
-rw-r--r--tensorflow/core/kernels/lookup_table_op.cc1
-rw-r--r--tensorflow/core/kernels/matmul_op.cc12
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc12
-rw-r--r--tensorflow/core/kernels/mkl_conv_grad_input_ops.cc9
-rw-r--r--tensorflow/core/kernels/partitioned_function_ops.cc1
-rw-r--r--tensorflow/core/kernels/reshape_util.cc1
-rw-r--r--tensorflow/core/kernels/resource_variable_ops.cc29
-rw-r--r--tensorflow/core/kernels/resource_variable_ops.h9
-rw-r--r--tensorflow/core/kernels/scatter_nd_op.cc10
-rw-r--r--tensorflow/core/kernels/scatter_nd_op_cpu_impl.h7
-rw-r--r--tensorflow/core/kernels/scatter_nd_op_test.cc2
-rw-r--r--tensorflow/core/kernels/sdca_internal.cc1
-rw-r--r--tensorflow/core/kernels/sdca_internal.h2
-rw-r--r--tensorflow/core/kernels/sendrecv_ops.cc1
-rw-r--r--tensorflow/core/kernels/serialize_sparse_op.cc6
-rw-r--r--tensorflow/core/kernels/set_kernels.cc44
-rw-r--r--tensorflow/core/kernels/sparse_concat_op.cc9
-rw-r--r--tensorflow/core/kernels/sparse_reduce_op.cc12
-rw-r--r--tensorflow/core/kernels/sparse_reorder_op.cc13
-rw-r--r--tensorflow/core/kernels/sparse_slice_grad_op.cc1
-rw-r--r--tensorflow/core/kernels/sparse_slice_op.cc7
-rw-r--r--tensorflow/core/kernels/sparse_softmax_op.cc7
-rw-r--r--tensorflow/core/kernels/sparse_split_op.cc14
-rw-r--r--tensorflow/core/kernels/sparse_tensors_map_ops.cc36
-rw-r--r--tensorflow/core/kernels/sparse_to_dense_op.cc6
-rw-r--r--tensorflow/core/lib/core/refcount.h11
-rw-r--r--tensorflow/core/lib/db/sqlite.cc1
-rw-r--r--tensorflow/core/lib/io/record_reader_writer_test.cc84
-rw-r--r--tensorflow/core/lib/io/zlib_compression_options.cc (renamed from tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java)28
-rw-r--r--tensorflow/core/lib/io/zlib_compression_options.h19
-rw-r--r--tensorflow/core/lib/io/zlib_inputstream.cc109
-rw-r--r--tensorflow/core/lib/io/zlib_inputstream.h27
-rw-r--r--tensorflow/core/ops/array_ops.cc14
-rw-r--r--tensorflow/core/ops/compat/ops_history.v1.pbtxt1103
-rw-r--r--tensorflow/core/ops/dataset_ops.cc7
-rw-r--r--tensorflow/core/ops/debug_ops.cc2
-rw-r--r--tensorflow/core/ops/lookup_ops.cc2
-rw-r--r--tensorflow/core/ops/math_ops.cc23
-rw-r--r--tensorflow/core/ops/math_ops_test.cc30
-rw-r--r--tensorflow/core/ops/ops.pbtxt39
-rw-r--r--tensorflow/core/platform/windows/port.cc20
-rw-r--r--tensorflow/core/protobuf/config.proto11
-rw-r--r--tensorflow/core/protobuf/debug.proto4
-rw-r--r--tensorflow/core/protobuf/eager_service.proto23
-rw-r--r--tensorflow/core/protobuf/tensorflow_server.proto2
-rw-r--r--tensorflow/core/public/session.h2
-rw-r--r--tensorflow/core/public/version.h2
-rw-r--r--tensorflow/core/util/batch_util.cc1
-rw-r--r--tensorflow/core/util/ctc/ctc_loss_util.h6
-rw-r--r--tensorflow/core/util/cuda_launch_config.h2
-rw-r--r--tensorflow/core/util/proto/BUILD10
-rw-r--r--tensorflow/core/util/proto/decode.h298
-rw-r--r--tensorflow/core/util/proto/proto_utils.cc70
-rw-r--r--tensorflow/core/util/proto/proto_utils.h33
-rw-r--r--tensorflow/core/util/sparse/dim_comparator.h16
-rw-r--r--tensorflow/core/util/sparse/group_iterator.h6
-rw-r--r--tensorflow/core/util/sparse/sparse_tensor.h196
-rw-r--r--tensorflow/core/util/sparse/sparse_tensor_test.cc91
-rw-r--r--tensorflow/core/util/stat_summarizer.cc22
-rw-r--r--tensorflow/core/util/stats_calculator.cc21
-rw-r--r--tensorflow/core/util/stats_calculator.h5
-rw-r--r--tensorflow/core/util/stats_calculator_test.cc76
-rw-r--r--tensorflow/core/util/tensor_format.cc2
-rw-r--r--tensorflow/docs_src/extend/new_data_formats.md60
-rw-r--r--tensorflow/docs_src/guide/autograph.md3
-rw-r--r--tensorflow/docs_src/guide/debugger.md2
-rw-r--r--tensorflow/docs_src/guide/eager.md36
-rw-r--r--tensorflow/docs_src/guide/index.md15
-rw-r--r--tensorflow/docs_src/guide/keras.md4
-rw-r--r--tensorflow/docs_src/guide/leftnav_files5
-rw-r--r--tensorflow/docs_src/guide/saved_model.md5
-rw-r--r--tensorflow/docs_src/install/index.md31
-rw-r--r--tensorflow/docs_src/install/install_c.md4
-rw-r--r--tensorflow/docs_src/install/install_go.md4
-rw-r--r--tensorflow/docs_src/install/install_java.md24
-rw-r--r--tensorflow/docs_src/install/install_linux.md440
-rw-r--r--tensorflow/docs_src/install/install_mac.md12
-rw-r--r--tensorflow/docs_src/install/install_raspbian.md2
-rw-r--r--tensorflow/docs_src/install/install_sources.md419
-rw-r--r--tensorflow/docs_src/install/install_windows.md2
-rw-r--r--tensorflow/docs_src/install/migration.md3
-rw-r--r--tensorflow/docs_src/mobile/README.md3
-rw-r--r--tensorflow/docs_src/mobile/index.md36
-rw-r--r--tensorflow/docs_src/mobile/leftnav_files15
-rw-r--r--tensorflow/docs_src/performance/performance_guide.md2
-rw-r--r--tensorflow/docs_src/performance/xla/developing_new_backend.md2
-rw-r--r--tensorflow/docs_src/performance/xla/operation_semantics.md41
-rw-r--r--tensorflow/docs_src/tutorials/_index.yaml54
-rw-r--r--tensorflow/docs_src/tutorials/_toc.yaml2
-rw-r--r--tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md2
-rw-r--r--tensorflow/docs_src/tutorials/eager/index.md2
-rw-r--r--tensorflow/docs_src/tutorials/estimators/linear.md3
-rw-r--r--tensorflow/docs_src/tutorials/keras/basic_classification.md2
-rw-r--r--tensorflow/docs_src/tutorials/keras/basic_regression.md2
-rw-r--r--tensorflow/docs_src/tutorials/keras/basic_text_classification.md2
-rw-r--r--tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md2
-rw-r--r--tensorflow/docs_src/tutorials/keras/save_and_restore_models.md2
-rw-r--r--tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowObjectDetectionAPIModel.java2
-rw-r--r--tensorflow/examples/speech_commands/freeze.py2
-rw-r--r--tensorflow/examples/speech_commands/models.py2
-rw-r--r--tensorflow/examples/tutorials/mnist/mnist_deep.py13
-rw-r--r--tensorflow/go/README.md2
-rw-r--r--tensorflow/go/op/wrappers.go1258
-rw-r--r--tensorflow/java/maven/hadoop/pom.xml2
-rw-r--r--tensorflow/java/maven/libtensorflow/pom.xml2
-rw-r--r--tensorflow/java/maven/libtensorflow_jni/pom.xml2
-rw-r--r--tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml2
-rw-r--r--tensorflow/java/maven/pom.xml2
-rw-r--r--tensorflow/java/maven/proto/pom.xml2
-rw-r--r--tensorflow/java/maven/spark-connector/pom.xml2
-rw-r--r--tensorflow/java/maven/tensorflow/pom.xml2
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java73
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFString.java27
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFType.java20
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java30
-rw-r--r--tensorflow/java/src/main/java/org/tensorflow/types/Types.java52
-rw-r--r--tensorflow/java/src/main/native/saved_model_bundle_jni.cc15
-rw-r--r--tensorflow/java/src/main/native/saved_model_bundle_jni.h4
-rw-r--r--tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java56
-rw-r--r--tensorflow/python/BUILD97
-rw-r--r--tensorflow/python/client/session.py17
-rw-r--r--tensorflow/python/client/session_list_devices_test.py8
-rw-r--r--tensorflow/python/client/session_test.py21
-rw-r--r--tensorflow/python/client/tf_session.i5
-rw-r--r--tensorflow/python/compat/BUILD1
-rw-r--r--tensorflow/python/compat/compat.py7
-rw-r--r--tensorflow/python/data/ops/iterator_ops.py19
-rw-r--r--tensorflow/python/data/util/nest.py1
-rwxr-xr-xtensorflow/python/debug/examples/examples_test.sh2
-rw-r--r--tensorflow/python/eager/BUILD2
-rw-r--r--tensorflow/python/eager/backprop.py24
-rw-r--r--tensorflow/python/eager/backprop_test.py56
-rw-r--r--tensorflow/python/eager/benchmarks_test.py60
-rw-r--r--tensorflow/python/eager/context.py5
-rw-r--r--tensorflow/python/eager/core_test.py8
-rw-r--r--tensorflow/python/eager/execute.py6
-rw-r--r--tensorflow/python/eager/function.py202
-rw-r--r--tensorflow/python/eager/function_test.py45
-rw-r--r--tensorflow/python/eager/memory_test.py4
-rw-r--r--tensorflow/python/eager/pywrap_tensor.cc32
-rw-r--r--tensorflow/python/eager/pywrap_tfe_src.cc8
-rw-r--r--tensorflow/python/eager/tensor_test.py2
-rw-r--r--tensorflow/python/estimator/BUILD16
-rw-r--r--tensorflow/python/estimator/api/BUILD4
-rw-r--r--tensorflow/python/estimator/canned/boosted_trees.py439
-rw-r--r--tensorflow/python/estimator/canned/metric_keys.py5
-rw-r--r--tensorflow/python/estimator/estimator.py16
-rw-r--r--tensorflow/python/estimator/estimator_test.py42
-rw-r--r--tensorflow/python/estimator/export/export_output.py11
-rw-r--r--tensorflow/python/estimator/export/export_output_test.py15
-rw-r--r--tensorflow/python/estimator/keras.py117
-rw-r--r--tensorflow/python/estimator/keras_test.py172
-rw-r--r--tensorflow/python/estimator/run_config.py40
-rw-r--r--tensorflow/python/estimator/training.py3
-rw-r--r--tensorflow/python/estimator/training_test.py4
-rw-r--r--tensorflow/python/framework/error_interpolation.py164
-rw-r--r--tensorflow/python/framework/error_interpolation_test.py203
-rw-r--r--tensorflow/python/framework/function_def_to_graph.py32
-rw-r--r--tensorflow/python/framework/function_def_to_graph_test.py34
-rw-r--r--tensorflow/python/framework/function_test.py16
-rw-r--r--tensorflow/python/framework/kernels.py46
-rw-r--r--tensorflow/python/framework/kernels_test.py41
-rw-r--r--tensorflow/python/framework/ops.py177
-rw-r--r--tensorflow/python/framework/ops_test.py8
-rw-r--r--tensorflow/python/framework/python_op_gen.cc1
-rw-r--r--tensorflow/python/framework/python_op_gen_internal.cc25
-rw-r--r--tensorflow/python/framework/subscribe.py2
-rw-r--r--tensorflow/python/framework/tensor_util.py9
-rw-r--r--tensorflow/python/framework/test_util.py78
-rw-r--r--tensorflow/python/framework/traceable_stack.py132
-rw-r--r--tensorflow/python/framework/traceable_stack_test.py133
-rw-r--r--tensorflow/python/grappler/layout_optimizer_test.py2
-rwxr-xr-xtensorflow/python/keras/BUILD25
-rw-r--r--tensorflow/python/keras/activations.py22
-rw-r--r--tensorflow/python/keras/applications/mobilenet.py22
-rw-r--r--tensorflow/python/keras/backend.py63
-rw-r--r--tensorflow/python/keras/backend_test.py61
-rw-r--r--tensorflow/python/keras/callbacks.py209
-rw-r--r--tensorflow/python/keras/callbacks_test.py144
-rw-r--r--tensorflow/python/keras/engine/base_layer.py10
-rw-r--r--tensorflow/python/keras/engine/network.py4
-rw-r--r--tensorflow/python/keras/engine/sequential.py28
-rw-r--r--tensorflow/python/keras/engine/sequential_test.py46
-rw-r--r--tensorflow/python/keras/engine/training.py112
-rw-r--r--tensorflow/python/keras/engine/training_eager.py473
-rw-r--r--tensorflow/python/keras/engine/training_gpu_test.py125
-rw-r--r--tensorflow/python/keras/engine/training_test.py39
-rw-r--r--tensorflow/python/keras/engine/training_utils.py138
-rw-r--r--tensorflow/python/keras/engine/training_utils_test.py150
-rw-r--r--tensorflow/python/keras/initializers.py100
-rw-r--r--tensorflow/python/keras/initializers_test.py10
-rw-r--r--tensorflow/python/keras/layers/advanced_activations.py37
-rw-r--r--tensorflow/python/keras/layers/advanced_activations_test.py8
-rw-r--r--tensorflow/python/keras/layers/convolutional_recurrent.py2
-rw-r--r--tensorflow/python/keras/layers/cudnn_recurrent_test.py4
-rw-r--r--tensorflow/python/keras/layers/normalization.py6
-rw-r--r--tensorflow/python/keras/layers/normalization_test.py18
-rw-r--r--tensorflow/python/keras/layers/recurrent.py3
-rw-r--r--tensorflow/python/keras/layers/recurrent_test.py18
-rw-r--r--tensorflow/python/keras/layers/wrappers.py7
-rw-r--r--tensorflow/python/keras/layers/wrappers_test.py8
-rw-r--r--tensorflow/python/keras/metrics.py470
-rw-r--r--tensorflow/python/keras/metrics_test.py285
-rw-r--r--tensorflow/python/keras/model_subclassing_test.py270
-rw-r--r--tensorflow/python/keras/testing_utils.py73
-rw-r--r--tensorflow/python/keras/utils/np_utils.py3
-rw-r--r--tensorflow/python/kernel_tests/BUILD4
-rw-r--r--tensorflow/python/kernel_tests/argmax_op_test.py7
-rw-r--r--tensorflow/python/kernel_tests/bitcast_op_test.py8
-rw-r--r--tensorflow/python/kernel_tests/cond_v2_test.py372
-rw-r--r--tensorflow/python/kernel_tests/distributions/util_test.py4
-rw-r--r--tensorflow/python/kernel_tests/gather_nd_op_test.py12
-rw-r--r--tensorflow/python/kernel_tests/linalg/BUILD20
-rw-r--r--tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py192
-rw-r--r--tensorflow/python/kernel_tests/resource_variable_ops_test.py9
-rw-r--r--tensorflow/python/kernel_tests/scatter_nd_ops_test.py27
-rw-r--r--tensorflow/python/lib/core/numpy.h3
-rw-r--r--tensorflow/python/lib/core/py_util.cc3
-rw-r--r--tensorflow/python/ops/array_grad.py2
-rw-r--r--tensorflow/python/ops/array_ops.py8
-rw-r--r--tensorflow/python/ops/cond_v2_impl.py139
-rw-r--r--tensorflow/python/ops/control_flow_ops.py2
-rw-r--r--tensorflow/python/ops/conv2d_benchmark.py3
-rw-r--r--tensorflow/python/ops/histogram_ops_test.py17
-rw-r--r--tensorflow/python/ops/image_ops_impl.py20
-rw-r--r--tensorflow/python/ops/init_ops.py99
-rw-r--r--tensorflow/python/ops/init_ops_test.py196
-rw-r--r--tensorflow/python/ops/linalg/linalg.py1
-rw-r--r--tensorflow/python/ops/linalg/linear_operator_zeros.py452
-rw-r--r--tensorflow/python/ops/linalg_ops.py2
-rw-r--r--tensorflow/python/ops/math_ops.py15
-rw-r--r--tensorflow/python/ops/parallel_for/BUILD1
-rw-r--r--tensorflow/python/ops/parallel_for/__init__.py10
-rw-r--r--tensorflow/python/ops/parallel_for/gradients_test.py11
-rw-r--r--tensorflow/python/ops/resource_variable_ops.py30
-rw-r--r--tensorflow/python/ops/rnn.py18
-rw-r--r--tensorflow/python/ops/tensor_array_ops.py6
-rw-r--r--tensorflow/python/ops/variable_scope.py87
-rw-r--r--tensorflow/python/ops/variables.py795
-rw-r--r--tensorflow/python/platform/gfile.py18
-rw-r--r--tensorflow/python/profiler/model_analyzer_test.py2
-rw-r--r--tensorflow/python/profiler/profile_context.py33
-rw-r--r--tensorflow/python/profiler/profile_context_test.py2
-rw-r--r--tensorflow/python/tools/api/generator/BUILD (renamed from tensorflow/tools/api/generator/BUILD)17
-rw-r--r--tensorflow/python/tools/api/generator/api_gen.bzl (renamed from tensorflow/tools/api/generator/api_gen.bzl)59
-rw-r--r--tensorflow/python/tools/api/generator/create_python_api.py (renamed from tensorflow/tools/api/generator/create_python_api.py)37
-rw-r--r--tensorflow/python/tools/api/generator/create_python_api_test.py (renamed from tensorflow/tools/api/generator/create_python_api_test.py)8
-rw-r--r--tensorflow/python/tools/api/generator/doc_srcs.py (renamed from tensorflow/tools/api/generator/doc_srcs.py)0
-rw-r--r--tensorflow/python/tools/api/generator/doc_srcs_test.py (renamed from tensorflow/tools/api/generator/doc_srcs_test.py)4
-rw-r--r--tensorflow/python/training/adam.py13
-rw-r--r--tensorflow/python/training/adam_test.py6
-rw-r--r--tensorflow/python/training/checkpoint_utils.py52
-rw-r--r--tensorflow/python/training/checkpointable/data_structures.py189
-rw-r--r--tensorflow/python/training/checkpointable/data_structures_test.py144
-rw-r--r--tensorflow/python/training/checkpointable/layer_utils.py13
-rw-r--r--tensorflow/python/training/checkpointable/tracking_test.py37
-rw-r--r--tensorflow/python/training/checkpointable/util.py40
-rw-r--r--tensorflow/python/training/learning_rate_decay.py14
-rw-r--r--tensorflow/python/training/quantize_training.i2
-rw-r--r--tensorflow/python/training/saver.py6
-rw-r--r--tensorflow/python/training/saver_test.py18
-rw-r--r--tensorflow/python/training/server_lib.py9
-rw-r--r--tensorflow/python/training/warm_starting_util.py18
-rw-r--r--tensorflow/python/util/deprecation.py34
-rw-r--r--tensorflow/python/util/deprecation_test.py22
-rw-r--r--tensorflow/python/util/nest.py11
-rw-r--r--tensorflow/python/util/nest_test.py68
-rw-r--r--tensorflow/python/util/py_checkpoint_reader.i1
-rw-r--r--tensorflow/python/util/stat_summarizer.i25
-rw-r--r--tensorflow/python/util/tf_export.py87
-rw-r--r--tensorflow/python/util/tf_export_test.py2
-rw-r--r--tensorflow/python/util/tf_stack.py103
-rw-r--r--tensorflow/python/util/util.cc308
-rw-r--r--tensorflow/python/util/util.h4
-rw-r--r--tensorflow/python/util/util.i3
-rw-r--r--tensorflow/security/advisory/tfsa-2018-001.md2
-rw-r--r--tensorflow/security/index.md2
-rw-r--r--tensorflow/stream_executor/cuda/cuda_dnn.cc29
-rw-r--r--tensorflow/stream_executor/cuda/cuda_dnn.h21
-rw-r--r--tensorflow/stream_executor/cuda/cuda_gpu_executor.cc181
-rw-r--r--tensorflow/stream_executor/cuda/cuda_gpu_executor.h18
-rw-r--r--tensorflow/stream_executor/cuda/cuda_stream.h4
-rw-r--r--tensorflow/stream_executor/dnn.cc2
-rw-r--r--tensorflow/stream_executor/dnn.h21
-rw-r--r--tensorflow/stream_executor/host/host_gpu_executor.cc2
-rw-r--r--tensorflow/stream_executor/host/host_gpu_executor.h2
-rw-r--r--tensorflow/stream_executor/host/host_stream.h4
-rw-r--r--tensorflow/stream_executor/module_spec.h65
-rw-r--r--tensorflow/stream_executor/stream.cc69
-rw-r--r--tensorflow/stream_executor/stream.h21
-rw-r--r--tensorflow/stream_executor/stream_executor_internal.h66
-rw-r--r--tensorflow/stream_executor/stream_executor_pimpl.cc38
-rw-r--r--tensorflow/stream_executor/stream_executor_pimpl.h76
-rw-r--r--tensorflow/tensorflow.bzl26
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.-variable.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.compat.pbtxt8
-rw-r--r--tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.initializers.pbtxt24
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt6
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt2
-rw-r--r--tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt14
-rw-r--r--tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.pbtxt130
-rw-r--r--tensorflow/tools/api/golden/tensorflow.linalg.pbtxt4
-rw-r--r--tensorflow/tools/api/golden/tensorflow.pbtxt2
-rw-r--r--tensorflow/tools/api/tests/api_compatibility_test.py42
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le2
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu10
-rw-r--r--tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le2
-rwxr-xr-xtensorflow/tools/ci_build/ci_parameterized_build.sh31
-rwxr-xr-xtensorflow/tools/ci_build/ci_sanity.sh2
-rwxr-xr-xtensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh28
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_bazel_from_source.sh2
-rwxr-xr-xtensorflow/tools/ci_build/install/install_openblas_ppc64le.sh29
-rwxr-xr-xtensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh33
-rwxr-xr-xtensorflow/tools/ci_build/linux/mkl/build-dev-container.sh29
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh8
-rw-r--r--tensorflow/tools/ci_build/windows/bazel/common_env.sh3
-rw-r--r--tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh50
-rw-r--r--tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh56
-rw-r--r--tensorflow/tools/compatibility/BUILD57
-rw-r--r--tensorflow/tools/compatibility/renames_v2.py134
-rw-r--r--tensorflow/tools/compatibility/testdata/test_file_v1_10.py (renamed from tensorflow/contrib/autograph/utils/type_hints.py)33
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade.py486
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_test.py5
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_v2.py115
-rw-r--r--tensorflow/tools/compatibility/tf_upgrade_v2_test.py83
-rw-r--r--tensorflow/tools/compatibility/update/BUILD15
-rw-r--r--tensorflow/tools/compatibility/update/generate_v2_renames_map.py103
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel2
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu14
-rw-r--r--tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn72
-rw-r--r--tensorflow/tools/docker/Dockerfile.gpu1
-rw-r--r--tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb2
-rw-r--r--tensorflow/tools/docs/doc_generator_visitor.py15
-rw-r--r--tensorflow/tools/docs/generate.py5
-rw-r--r--tensorflow/tools/docs/generate_lib.py30
-rw-r--r--tensorflow/tools/docs/generate_lib_test.py13
-rw-r--r--tensorflow/tools/graph_transforms/fold_old_batch_norms.cc2
-rw-r--r--tensorflow/tools/graph_transforms/transform_utils.cc13
-rw-r--r--tensorflow/tools/lib_package/BUILD2
-rw-r--r--tensorflow/tools/pip_package/BUILD22
-rwxr-xr-xtensorflow/tools/pip_package/build_pip_package.sh8
-rw-r--r--tensorflow/tools/pip_package/setup.py4
-rw-r--r--tensorflow/workspace.bzl40
-rw-r--r--third_party/examples/eager/spinn/spinn.py4
-rw-r--r--third_party/llvm/llvm.autogenerated.BUILD26
-rw-r--r--third_party/mkl_dnn/BUILD10
-rw-r--r--third_party/mkl_dnn/LICENSE201
-rw-r--r--third_party/mkl_dnn/build_defs.bzl13
-rw-r--r--third_party/mkl_dnn/mkldnn.BUILD29
-rw-r--r--third_party/nccl/nccl_configure.bzl2
-rw-r--r--third_party/repo.bzl48
-rw-r--r--third_party/systemlibs/BUILD0
-rw-r--r--third_party/systemlibs/BUILD.tpl0
-rw-r--r--third_party/systemlibs/astor.BUILD12
-rw-r--r--third_party/systemlibs/build_defs.bzl.tpl32
-rw-r--r--third_party/systemlibs/curl.BUILD12
-rw-r--r--third_party/systemlibs/cython.BUILD13
-rw-r--r--third_party/systemlibs/flatbuffers.BUILD38
-rw-r--r--third_party/systemlibs/gif.BUILD12
-rw-r--r--third_party/systemlibs/grpc.BUILD54
-rw-r--r--third_party/systemlibs/jemalloc.BUILD30
-rw-r--r--third_party/systemlibs/jpeg.BUILD12
-rw-r--r--third_party/systemlibs/jsoncpp.BUILD37
-rw-r--r--third_party/systemlibs/lmdb.BUILD12
-rw-r--r--third_party/systemlibs/nasm.BUILD12
-rw-r--r--third_party/systemlibs/pcre.BUILD12
-rw-r--r--third_party/systemlibs/png.BUILD12
-rw-r--r--third_party/systemlibs/re2.BUILD12
-rw-r--r--third_party/systemlibs/six.BUILD11
-rw-r--r--third_party/systemlibs/snappy.BUILD12
-rw-r--r--third_party/systemlibs/sqlite.BUILD15
-rw-r--r--third_party/systemlibs/swig.BUILD23
-rw-r--r--third_party/systemlibs/syslibs_configure.bzl160
-rw-r--r--third_party/systemlibs/termcolor.BUILD12
-rw-r--r--third_party/systemlibs/zlib.BUILD12
-rw-r--r--third_party/toolchains/cpus/py/BUILD242
-rw-r--r--third_party/toolchains/cpus/py3/BUILD234
-rw-r--r--tools/bazel.rc4
1235 files changed, 54099 insertions, 24674 deletions
diff --git a/.gitignore b/.gitignore
index b5306b8b79..5afe375f46 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ Podfile.lock
/tensorflow/contrib/lite/examples/ios/simple/data/*.tflite
xcuserdata/**
/api_init_files_list.txt
+/estimator_api_init_files_list.txt
# Android
.gradle
diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md
index 2f3df7cda9..52faed9297 100644
--- a/ISSUE_TEMPLATE.md
+++ b/ISSUE_TEMPLATE.md
@@ -15,9 +15,10 @@ If you open a GitHub issue, here is our policy:
### System information
- **Have I written custom code (as opposed to using a stock example script provided in TensorFlow)**:
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
+- **Mobile device (e.g. iPhone 8, Pixel 2, Samsung Galaxy) if the issue happens on mobile device**:
- **TensorFlow installed from (source or binary)**:
- **TensorFlow version (use command below)**:
-- **Python version**:
+- **Python version**:
- **Bazel version (if compiling from source)**:
- **GCC/Compiler version (if compiling from source)**:
- **CUDA/cuDNN version**:
diff --git a/RELEASE.md b/RELEASE.md
index 7bb1e3e1c8..6b67072f8e 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -34,18 +34,22 @@
* Using `tf.layers` in a subclassed `tf.keras.Model` class. See
[here](https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/layers) for more details
* `tf.data`:
- * The `DatasetBase::DebugString()` method is now `const`.
- * Added the `tf.contrib.data.sample_from_datasets()` API for randomly sampling from multiple datasets.
+ * `Dataset.from_generator()` now accepts an `args` list, in order to create nested generators.
+ * `Dataset.list_files()` now produces determinstic results when `shuffle=False` or a `seed` is passed.
+ * `tf.contrib.data.sample_from_datasets()` and `tf.contrib.data.choose_from_datasets()` make it easier to sample or deterministically choose elements from multiple datasets.
+ * `tf.contrib.data.make_csv_dataset()` now supports line breaks in quoted strings, and two infrequently used arguments removed.
+ * (C++) `DatasetBase::DebugString()` is now `const`.
+ * (C++) `DatasetBase::MakeIterator()` has been renamed to `DatasetBase::MakeIteratorInternal()`.
+ * (C++) `IteratorBase::Initialize()` method was added to support raising errors during iterator construction.
* Eager Execution:
+ * Added the ability to pause recording operations for gradient computation via `tf.GradientTape.stop_recording`.
+ * Updated documentation, introductory notebooks.
* `tf.keras`:
* Move Keras code out of _impl folder and remove API files.
* `tf.keras.Model.save_weights` now saves in TensorFlow format by default.
* Enable dataset iterators to be passed to `tf.keras.Model` training/eval methods.
-* Accelerated Linear Algebra (XLA):
-* TensorFlow Debugger (tfdbg): fix an issue in which the TensorBoard Debugger Plugin could not handle total source file size exceeding gRPC message size limit (4 MB).
+* TensorFlow Debugger (tfdbg) CLI: fix an issue in which the TensorBoard Debugger Plugin could not handle total source file size exceeding gRPC message size limit (4 MB).
* `tf.contrib`:
- * Add `tf.contrib.data.choose_from_datasets()`.
- * `tf.contrib.data.make_csv_dataset()` now supports line breaks in quoted strings. Two arguments were removed from `make_csv_dataset`.
* `tf.contrib.framework.zero_initializer` supports ResourceVariable.
* Adding "constrained_optimization" to tensorflow/contrib.
* Other:
@@ -55,7 +59,6 @@
* More consistent GcsFileSystem behavior for certain reads past EOF.
* Update benchmark for tf.scan to match ranges across eager and graph modes.
* Fixed bug in `tf.reduce_prod gradient` for complex dtypes.
- * Add optional `args` argument to `Dataset.from_generator()`.
* Allow the use of '.' in variables (e.g. "hparams.parse('a.b=1.0')"), which would previously raise an error. This will correspond to an attribute name with an embedded '.' symbol (e.g. 'a.b'), which can only be accessed indirectly (e.g. through getattr and setattr). To set this up the user will first need to explicitly add the variable to the hparam object (e.g. "hparams.add_hparam(name='a.b', value=0.0)").
* Benchmark for tf.scan in graph and eager modes.
* Added complex128 support to FFT, FFT2D, FFT3D, IFFT, IFFT2D, and IFFT3D.
@@ -65,7 +68,6 @@
* LinearOperator[1D,2D,3D]Circulant added to `tensorflow.linalg`.
* Conv3D, Conv3DBackpropInput, Conv3DBackpropFilter now supports arbitrary.
* Added `tf.train.Checkpoint` for reading/writing object-based checkpoints.
- * `Dataset.list_files()` now produces determinstic results when `shuffle=False` or a `seed` is passed.
* Added LinearOperatorKronecker, a dense-free implementation of the Kronecker Product.
* Allow LinearOperator to broadcast.
* SavedModelBuilder will now deduplicate asset names that point to files with the same basename and the same contents. Note that this may result in new asset files included in SavedModels in cases where assets with the same name but different contents were previously overwriting each other.
diff --git a/WORKSPACE b/WORKSPACE
index 1c00b3fc7e..f1d0ed565d 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -18,7 +18,7 @@ closure_repositories()
# files, in case the parsing of those build files depends on the bazel
# version we require here.
load("//tensorflow:version_check.bzl", "check_bazel_version_at_least")
-check_bazel_version_at_least("0.10.0")
+check_bazel_version_at_least("0.15.0")
load("//tensorflow:workspace.bzl", "tf_workspace")
diff --git a/configure.py b/configure.py
index e4495fb684..6d0c077406 100644
--- a/configure.py
+++ b/configure.py
@@ -35,8 +35,8 @@ except ImportError:
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
-_DEFAULT_NCCL_VERSION = '1.3'
-_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,5.2'
+_DEFAULT_NCCL_VERSION = '2.2'
+_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
@@ -93,10 +93,10 @@ def get_input(question):
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
-Args:
- target: items to link to.
- link_name: name of the link.
-"""
+ Args:
+ target: items to link to.
+ link_name: name of the link.
+ """
try:
os.symlink(target, link_name)
except OSError as e:
@@ -110,11 +110,11 @@ Args:
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
-Args:
- filename: string for filename.
- old: string to replace.
- new: new string to replace to.
-"""
+ Args:
+ filename: string for filename.
+ old: string to replace.
+ new: new string to replace to.
+ """
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
@@ -153,18 +153,14 @@ def get_python_path(environ_cp, python_bin_path):
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
- library_paths = run_shell([
- python_bin_path, '-c',
- 'import site; print("\\n".join(site.getsitepackages()))'
- ]).split('\n')
+ library_paths = run_shell(
+ [python_bin_path, '-c',
+ 'import site; print("\\n".join(site.getsitepackages()))']).split('\n')
except subprocess.CalledProcessError:
- library_paths = [
- run_shell([
- python_bin_path, '-c',
- 'from distutils.sysconfig import get_python_lib;'
- 'print(get_python_lib())'
- ])
- ]
+ library_paths = [run_shell(
+ [python_bin_path, '-c',
+ 'from distutils.sysconfig import get_python_lib;'
+ 'print(get_python_lib())'])]
all_paths = set(python_paths + library_paths)
@@ -177,30 +173,27 @@ def get_python_path(environ_cp, python_bin_path):
def get_python_major_version(python_bin_path):
"""Get the python major version."""
- return run_shell(
- [python_bin_path, '-c', 'import sys; print(sys.version[0])'])
+ return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
- ask_python_bin_path = (
- 'Please specify the location of python. [Default is '
- '%s]: ') % default_python_bin_path
+ ask_python_bin_path = ('Please specify the location of python. [Default is '
+ '%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(
- python_bin_path, os.X_OK):
+ python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
- print('%s is not executable. Is it the python binary?' %
- python_bin_path)
+ print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
@@ -237,9 +230,8 @@ def setup_python(environ_cp):
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Write tools/python_bin_path.sh
- with open(
- os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
- 'w') as f:
+ with open(os.path.join(
+ _TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'), 'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
@@ -267,10 +259,10 @@ def reset_tf_configure_bazelrc(workspace_path):
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
-These files could interfere with Bazel parsing.
-"""
- makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
- 'contrib', 'makefile', 'downloads')
+ These files could interfere with Bazel parsing.
+ """
+ makefile_download_dir = os.path.join(
+ _TF_WORKSPACE_ROOT, 'tensorflow', 'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
@@ -287,28 +279,28 @@ def get_var(environ_cp,
no_reply=None):
"""Get boolean input from user.
-If var_name is not set in env, ask user to enable query_item or not. If the
-response is empty, use the default.
-
-Args:
- environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- query_item: string for feature related to the variable, e.g. "Hadoop File
- System".
- enabled_by_default: boolean for default behavior.
- question: optional string for how to ask for user input.
- yes_reply: optional string for reply when feature is enabled.
- no_reply: optional string for reply when feature is disabled.
-
-Returns:
- boolean value of the variable.
-
-Raises:
- UserInputError: if an environment variable is set, but it cannot be
- interpreted as a boolean indicator, assume that the user has made a
- scripting error, and will continue to provide invalid input.
- Raise the error to avoid infinitely looping.
-"""
+ If var_name is not set in env, ask user to enable query_item or not. If the
+ response is empty, use the default.
+
+ Args:
+ environ_cp: copy of the os.environ.
+ var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
+ query_item: string for feature related to the variable, e.g. "Hadoop File
+ System".
+ enabled_by_default: boolean for default behavior.
+ question: optional string for how to ask for user input.
+ yes_reply: optional string for reply when feature is enabled.
+ no_reply: optional string for reply when feature is disabled.
+
+ Returns:
+ boolean value of the variable.
+
+ Raises:
+ UserInputError: if an environment variable is set, but it cannot be
+ interpreted as a boolean indicator, assume that the user has made a
+ scripting error, and will continue to provide invalid input.
+ Raise the error to avoid infinitely looping.
+ """
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
@@ -338,8 +330,9 @@ Raises:
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
- 'Current value is %s.' % (var_name, ', '.join(true_strings),
- ', '.join(false_strings), var))
+ 'Current value is %s.' % (
+ var_name, ', '.join(true_strings), ', '.join(false_strings),
+ var))
while var is None:
user_input_origin = get_input(question)
@@ -362,37 +355,32 @@ Raises:
return var
-def set_build_var(environ_cp,
- var_name,
- query_item,
- option_name,
- enabled_by_default,
- bazel_config_name=None):
+def set_build_var(environ_cp, var_name, query_item, option_name,
+ enabled_by_default, bazel_config_name=None):
"""Set if query_item will be enabled for the build.
-Ask user if query_item will be enabled. Default is used if no input is given.
-Set subprocess environment variable and write to .bazelrc if enabled.
-
-Args:
- environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- query_item: string for feature related to the variable, e.g. "Hadoop File
- System".
- option_name: string for option to define in .bazelrc.
- enabled_by_default: boolean for default behavior.
- bazel_config_name: Name for Bazel --config argument to enable build feature.
-"""
-
- var = str(
- int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
+ Ask user if query_item will be enabled. Default is used if no input is given.
+ Set subprocess environment variable and write to .bazelrc if enabled.
+
+ Args:
+ environ_cp: copy of the os.environ.
+ var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
+ query_item: string for feature related to the variable, e.g. "Hadoop File
+ System".
+ option_name: string for option to define in .bazelrc.
+ enabled_by_default: boolean for default behavior.
+ bazel_config_name: Name for Bazel --config argument to enable build feature.
+ """
+
+ var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
- write_to_bazelrc(
- 'build:%s --define %s=true' % (bazel_config_name, option_name))
+ write_to_bazelrc('build:%s --define %s=true'
+ % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
@@ -404,19 +392,19 @@ def set_action_env_var(environ_cp,
no_reply=None):
"""Set boolean action_env variable.
-Ask user if query_item will be enabled. Default is used if no input is given.
-Set environment variable and write to .bazelrc.
-
-Args:
- environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- query_item: string for feature related to the variable, e.g. "Hadoop File
- System".
- enabled_by_default: boolean for default behavior.
- question: optional string for how to ask for user input.
- yes_reply: optional string for reply when feature is enabled.
- no_reply: optional string for reply when feature is disabled.
-"""
+ Ask user if query_item will be enabled. Default is used if no input is given.
+ Set environment variable and write to .bazelrc.
+
+ Args:
+ environ_cp: copy of the os.environ.
+ var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
+ query_item: string for feature related to the variable, e.g. "Hadoop File
+ System".
+ enabled_by_default: boolean for default behavior.
+ question: optional string for how to ask for user input.
+ yes_reply: optional string for reply when feature is enabled.
+ no_reply: optional string for reply when feature is disabled.
+ """
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
@@ -428,15 +416,15 @@ Args:
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
-Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
-'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
+ Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
+ 'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
-Args:
- version: a version to be converted
+ Args:
+ version: a version to be converted
-Returns:
- An integer if converted successfully, otherwise return None.
-"""
+ Returns:
+ An integer if converted successfully, otherwise return None.
+ """
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
@@ -450,17 +438,16 @@ Returns:
def check_bazel_version(min_version):
"""Check installed bazel version is at least min_version.
-Args:
- min_version: string for minimum bazel version.
+ Args:
+ min_version: string for minimum bazel version.
-Returns:
- The bazel version detected.
-"""
+ Returns:
+ The bazel version detected.
+ """
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
- curr_version = run_shell(
- ['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
+ curr_version = run_shell(['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
@@ -479,9 +466,8 @@ Returns:
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
- print(
- 'Please upgrade your bazel installation to version %s or higher to '
- 'build TensorFlow!' % min_version)
+ print('Please upgrade your bazel installation to version %s or higher to '
+ 'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
@@ -489,11 +475,11 @@ Returns:
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
-Also append CC optimization flags to bazel.rc..
+ Also append CC optimization flags to bazel.rc..
-Args:
- environ_cp: copy of the os.environ.
-"""
+ Args:
+ environ_cp: copy of the os.environ.
+ """
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
@@ -501,12 +487,11 @@ Args:
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native'
- question = (
- 'Please specify optimization flags to use during compilation when'
- ' bazel option "--config=opt" is specified [Default is %s]: '
- ) % default_cc_opt_flags
- cc_opt_flags = get_from_env_or_user_or_default(
- environ_cp, 'CC_OPT_FLAGS', question, default_cc_opt_flags)
+ question = ('Please specify optimization flags to use during compilation when'
+ ' bazel option "--config=opt" is specified [Default is %s]: '
+ ) % default_cc_opt_flags
+ cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
+ question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
@@ -514,13 +499,12 @@ Args:
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
-
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
-Args:
- environ_cp: copy of the os.environ.
-"""
+ Args:
+ environ_cp: copy of the os.environ.
+ """
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
@@ -553,18 +537,18 @@ def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
-If var_name has been set as environment variable, use the preset value, else
-ask for user input. If no input is provided, the default is used.
+ If var_name has been set as environment variable, use the preset value, else
+ ask for user input. If no input is provided, the default is used.
-Args:
- environ_cp: copy of the os.environ.
- var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
- ask_for_var: string for how to ask for user input.
- var_default: default value string.
+ Args:
+ environ_cp: copy of the os.environ.
+ var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
+ ask_for_var: string for how to ask for user input.
+ var_default: default value string.
-Returns:
- string value for var_name
-"""
+ Returns:
+ string value for var_name
+ """
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
@@ -577,9 +561,8 @@ Returns:
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
- ask_clang_path = (
- 'Please specify which clang should be used as device and '
- 'host compiler. [Default is %s]: ') % default_clang_path
+ ask_clang_path = ('Please specify which clang should be used as device and '
+ 'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
@@ -589,8 +572,7 @@ def set_clang_cuda_compiler_path(environ_cp):
break
# Reset and retry
- print('Invalid clang path: %s cannot be found.' %
- clang_cuda_compiler_path)
+ print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
@@ -599,45 +581,47 @@ def set_clang_cuda_compiler_path(environ_cp):
clang_cuda_compiler_path)
-def prompt_loop_or_load_from_env(environ_cp,
- var_name,
- var_default,
- ask_for_var,
- check_success,
- error_msg,
- suppress_default_error=False,
- n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
+def prompt_loop_or_load_from_env(
+ environ_cp,
+ var_name,
+ var_default,
+ ask_for_var,
+ check_success,
+ error_msg,
+ suppress_default_error=False,
+ n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS
+):
"""Loop over user prompts for an ENV param until receiving a valid response.
-For the env param var_name, read from the environment or verify user input
-until receiving valid input. When done, set var_name in the environ_cp to its
-new value.
-
-Args:
- environ_cp: (Dict) copy of the os.environ.
- var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
- var_default: (String) default value string.
- ask_for_var: (String) string for how to ask for user input.
- check_success: (Function) function that takes one argument and returns a
- boolean. Should return True if the value provided is considered valid. May
- contain a complex error message if error_msg does not provide enough
- information. In that case, set suppress_default_error to True.
- error_msg: (String) String with one and only one '%s'. Formatted with each
- invalid response upon check_success(input) failure.
- suppress_default_error: (Bool) Suppress the above error message in favor of
- one from the check_success function.
- n_ask_attempts: (Integer) Number of times to query for valid input before
- raising an error and quitting.
-
-Returns:
- [String] The value of var_name after querying for input.
-
-Raises:
- UserInputError: if a query has been attempted n_ask_attempts times without
- success, assume that the user has made a scripting error, and will
- continue to provide invalid input. Raise the error to avoid infinitely
- looping.
-"""
+ For the env param var_name, read from the environment or verify user input
+ until receiving valid input. When done, set var_name in the environ_cp to its
+ new value.
+
+ Args:
+ environ_cp: (Dict) copy of the os.environ.
+ var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
+ var_default: (String) default value string.
+ ask_for_var: (String) string for how to ask for user input.
+ check_success: (Function) function that takes one argument and returns a
+ boolean. Should return True if the value provided is considered valid. May
+ contain a complex error message if error_msg does not provide enough
+ information. In that case, set suppress_default_error to True.
+ error_msg: (String) String with one and only one '%s'. Formatted with each
+ invalid response upon check_success(input) failure.
+ suppress_default_error: (Bool) Suppress the above error message in favor of
+ one from the check_success function.
+ n_ask_attempts: (Integer) Number of times to query for valid input before
+ raising an error and quitting.
+
+ Returns:
+ [String] The value of var_name after querying for input.
+
+ Raises:
+ UserInputError: if a query has been attempted n_ask_attempts times without
+ success, assume that the user has made a scripting error, and will
+ continue to provide invalid input. Raise the error to avoid infinitely
+ looping.
+ """
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
@@ -645,7 +629,9 @@ Raises:
)
for _ in range(n_ask_attempts):
- val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
+ val = get_from_env_or_user_or_default(environ_cp,
+ var_name,
+ full_query,
default)
if check_success(val):
break
@@ -653,9 +639,9 @@ Raises:
print(error_msg % val)
environ_cp[var_name] = ''
else:
- raise UserInputError(
- 'Invalid %s setting was provided %d times in a row. '
- 'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
+ raise UserInputError('Invalid %s setting was provided %d times in a row. '
+ 'Assuming to be a scripting mistake.' %
+ (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
@@ -664,16 +650,16 @@ Raises:
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
- default_ndk_path = cygpath(
- '%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
+ default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
+ environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
- return (os.path.exists(path)
- and os.path.exists(os.path.join(path, 'source.properties')))
+ return (os.path.exists(path) and
+ os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
@@ -682,7 +668,8 @@ def create_android_ndk_rule(environ_cp):
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
- 'does not exist.'))
+ 'does not exist.')
+ )
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
@@ -693,14 +680,14 @@ def create_android_sdk_rule(environ_cp):
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
- default_sdk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
+ default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
- return (os.path.exists(path)
- and os.path.exists(os.path.join(path, 'platforms'))
- and os.path.exists(os.path.join(path, 'build-tools')))
+ return (os.path.exists(path) and
+ os.path.exists(os.path.join(path, 'platforms')) and
+ os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
@@ -716,9 +703,9 @@ def create_android_sdk_rule(environ_cp):
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
- return os.path.exists(
- os.path.join(android_sdk_home_path, 'platforms',
- 'android-' + api_level))
+ return os.path.exists(os.path.join(android_sdk_home_path,
+ 'platforms',
+ 'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
@@ -733,8 +720,9 @@ def create_android_sdk_rule(environ_cp):
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
- return os.path.exists(
- os.path.join(android_sdk_home_path, 'build-tools', version))
+ return os.path.exists(os.path.join(android_sdk_home_path,
+ 'build-tools',
+ version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
@@ -748,8 +736,10 @@ def create_android_sdk_rule(environ_cp):
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
- write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
- write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
+ write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL',
+ android_api_level)
+ write_action_env_to_bazelrc('ANDROID_SDK_HOME',
+ android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
@@ -766,12 +756,11 @@ def check_ndk_level(android_ndk_home_path):
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
- print(
- 'WARNING: The API level of the NDK in %s is %s, which is not '
- 'supported by Bazel (officially supported versions: %s). Please use '
- 'another version. Compiling Android targets may result in confusing '
- 'errors.\n' % (android_ndk_home_path, ndk_api_level,
- _SUPPORTED_ANDROID_NDK_VERSIONS))
+ print('WARNING: The API level of the NDK in %s is %s, which is not '
+ 'supported by Bazel (officially supported versions: %s). Please use '
+ 'another version. Compiling Android targets may result in confusing '
+ 'errors.\n' % (android_ndk_home_path, ndk_api_level,
+ _SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
@@ -788,30 +777,30 @@ def set_gcc_host_compiler_path(environ_cp):
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
- ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
+ ask_for_var=
+ 'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
- write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH',
- gcc_host_compiler_path)
+ write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
-For example:
-Given (7, 2) -> 7.0
- (7.0.1, 2) -> 7.0
- (5, 1) -> 5
- (5.0.3.2, 1) -> 5
-
-Args:
- version_str: String, the version string.
- sequence_count: int, an integer.
-Returns:
- string, reformatted version string.
-"""
+ For example:
+ Given (7, 2) -> 7.0
+ (7.0.1, 2) -> 7.0
+ (5, 1) -> 5
+ (5.0.3.2, 1) -> 5
+
+ Args:
+ version_str: String, the version string.
+ sequence_count: int, an integer.
+ Returns:
+ string, reformatted version string.
+ """
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
@@ -828,8 +817,7 @@ def set_tf_cuda_version(environ_cp):
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
# Configure the Cuda SDK version to use.
tf_cuda_version = get_from_env_or_user_or_default(
- environ_cp, 'TF_CUDA_VERSION', ask_cuda_version,
- _DEFAULT_CUDA_VERSION)
+ environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
# Find out where the CUDA toolkit is installed
@@ -840,12 +828,11 @@ def set_tf_cuda_version(environ_cp):
elif is_linux():
# If the default doesn't exist, try an alternative default.
if (not os.path.exists(default_cuda_path)
- ) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
+ ) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
- '[Default is %s]: ') % (tf_cuda_version,
- default_cuda_path)
+ '[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
@@ -858,8 +845,7 @@ def set_tf_cuda_version(environ_cp):
elif is_macos():
cuda_rt_lib_path = 'lib/libcudart.%s.dylib' % tf_cuda_version
- cuda_toolkit_path_full = os.path.join(cuda_toolkit_path,
- cuda_rt_lib_path)
+ cuda_toolkit_path_full = os.path.join(cuda_toolkit_path, cuda_rt_lib_path)
if os.path.exists(cuda_toolkit_path_full):
break
@@ -870,10 +856,9 @@ def set_tf_cuda_version(environ_cp):
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
- raise UserInputError(
- 'Invalid TF_CUDA_SETTING setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
+ raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
+ 'times in a row. Assuming to be a scripting mistake.' %
+ _DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
@@ -895,13 +880,11 @@ def set_tf_cudnn_version(environ_cp):
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
- ask_cudnn_path = (
- r'Please specify the location where cuDNN %s library is '
- 'installed. Refer to README.md for more details. [Default'
- ' is %s]:') % (tf_cudnn_version, default_cudnn_path)
+ ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
+ 'installed. Refer to README.md for more details. [Default'
+ ' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
- environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path,
- default_cudnn_path)
+ environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
@@ -920,12 +903,11 @@ def set_tf_cudnn_version(environ_cp):
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
- cuda_dnn_lib_path_full = os.path.join(cudnn_install_path,
- cuda_dnn_lib_path)
+ cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
- cuda_dnn_lib_alt_path_full):
+ cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
@@ -938,8 +920,7 @@ def set_tf_cudnn_version(environ_cp):
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists('%s.%s' % (cudnn_path_from_ldconfig,
tf_cudnn_version)):
- cudnn_install_path = os.path.dirname(
- cudnn_path_from_ldconfig)
+ cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
@@ -953,10 +934,9 @@ def set_tf_cudnn_version(environ_cp):
environ_cp['TF_CUDNN_VERSION'] = ''
else:
- raise UserInputError(
- 'Invalid TF_CUDNN setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
+ raise UserInputError('Invalid TF_CUDNN setting was provided %d '
+ 'times in a row. Assuming to be a scripting mistake.' %
+ _DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
@@ -997,18 +977,17 @@ def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
def set_tf_tensorrt_install_path(environ_cp):
"""Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION.
-Adapted from code contributed by Sami Kama (https://github.com/samikama).
+ Adapted from code contributed by Sami Kama (https://github.com/samikama).
-Args:
- environ_cp: copy of the os.environ.
+ Args:
+ environ_cp: copy of the os.environ.
-Raises:
- ValueError: if this method was called under non-Linux platform.
- UserInputError: if user has provided invalid input multiple times.
-"""
+ Raises:
+ ValueError: if this method was called under non-Linux platform.
+ UserInputError: if user has provided invalid input multiple times.
+ """
if not is_linux():
- raise ValueError(
- 'Currently TensorRT is only supported on Linux platform.')
+ raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
@@ -1025,8 +1004,7 @@ Raises:
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
- trt_install_path = os.path.realpath(
- os.path.expanduser(trt_install_path))
+ trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
"""Search for libnvinfer.so in "search_path"."""
@@ -1034,14 +1012,14 @@ Raises:
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
- for x in os.listdir(search_path) if 'libnvinfer.so' in x
+ for x in os.listdir(search_path)
+ if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
- possible_files.update(
- find_libs(os.path.join(trt_install_path, 'lib64')))
+ possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
@@ -1071,17 +1049,14 @@ Raises:
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
- trt_install_path = os.path.dirname(
- libnvinfer_path_from_ldconfig)
+ trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
- print(
- 'TensorRT libraries found in one the following directories',
- 'are not compatible with selected cuda and cudnn installations'
- )
+ print('TensorRT libraries found in one the following directories',
+ 'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
@@ -1089,8 +1064,7 @@ Raises:
print(libnvinfer_path_from_ldconfig)
else:
print(
- 'Invalid path to TensorRT. None of the following files can be found:'
- )
+ 'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
@@ -1098,10 +1072,9 @@ Raises:
print(libnvinfer_path_from_ldconfig)
else:
- raise UserInputError(
- 'Invalid TF_TENSORRT setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
+ raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
+ 'times in a row. Assuming to be a scripting mistake.' %
+ _DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
@@ -1113,25 +1086,25 @@ Raises:
def set_tf_nccl_install_path(environ_cp):
"""Set NCCL_INSTALL_PATH and TF_NCCL_VERSION.
-Args:
- environ_cp: copy of the os.environ.
+ Args:
+ environ_cp: copy of the os.environ.
-Raises:
- ValueError: if this method was called under non-Linux platform.
- UserInputError: if user has provided invalid input multiple times.
-"""
+ Raises:
+ ValueError: if this method was called under non-Linux platform.
+ UserInputError: if user has provided invalid input multiple times.
+ """
if not is_linux():
- raise ValueError(
- 'Currently NCCL is only supported on Linux platforms.')
+ raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
- 'Please specify the NCCL version you want to use. '
- '[Leave empty to default to NCCL %s]: ') % _DEFAULT_NCCL_VERSION
+ 'Please specify the NCCL version you want to use. If NCCL %s is not '
+ 'installed, then you can use version 1.3 that can be fetched '
+ 'automatically but it may have worse performance with multiple GPUs. '
+ '[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
- environ_cp, 'TF_NCCL_VERSION', ask_nccl_version,
- _DEFAULT_NCCL_VERSION)
+ environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, _DEFAULT_NCCL_VERSION)
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
if tf_nccl_version == '1':
@@ -1144,17 +1117,15 @@ Raises:
# NCCL_INSTALL_PATH, pass separate NCCL_LIB_PATH and NCCL_HDR_PATH to
# nccl_configure.bzl
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
- ask_nccl_path = (
- r'Please specify the location where NCCL %s library is '
- 'installed. Refer to README.md for more details. [Default '
- 'is %s]:') % (tf_nccl_version, default_nccl_path)
+ ask_nccl_path = (r'Please specify the location where NCCL %s library is '
+ 'installed. Refer to README.md for more details. [Default '
+ 'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
- nccl_install_path = os.path.realpath(
- os.path.expanduser(nccl_install_path))
+ nccl_install_path = os.path.realpath(os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
@@ -1167,26 +1138,22 @@ Raises:
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
- nccl_license_path = os.path.join(nccl_install_path, 'NCCL-SLA.txt')
- if os.path.exists(nccl_lib_path) and os.path.exists(
- nccl_hdr_path) and os.path.exists(nccl_license_path):
+ if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
break
# Reset and Retry
- print(
- 'Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
- 'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
- nccl_hdr_path))
+ print('Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
+ 'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
+ nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
- raise UserInputError(
- 'Invalid TF_NCCL setting was provided %d '
- 'times in a row. Assuming to be a scripting mistake.' %
- _DEFAULT_PROMPT_ASK_ATTEMPTS)
+ raise UserInputError('Invalid TF_NCCL setting was provided %d '
+ 'times in a row. Assuming to be a scripting mistake.' %
+ _DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
@@ -1196,15 +1163,14 @@ Raises:
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
-Args:
- environ_cp: copy of the os.environ.
-Returns:
- string of native cuda compute capabilities, separated by comma.
-"""
+ Args:
+ environ_cp: copy of the os.environ.
+ Returns:
+ string of native cuda compute capabilities, separated by comma.
+ """
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
- if os.path.isfile(device_query_bin) and os.access(device_query_bin,
- os.X_OK):
+ if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
@@ -1235,7 +1201,7 @@ def set_tf_cuda_compute_capabilities(environ_cp):
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
- 'build time and binary size. [Default is: %s]' %
+ 'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
@@ -1244,8 +1210,7 @@ def set_tf_cuda_compute_capabilities(environ_cp):
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
- tf_cuda_compute_capabilities = ''.join(
- tf_cuda_compute_capabilities.split())
+ tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
@@ -1254,9 +1219,7 @@ def set_tf_cuda_compute_capabilities(environ_cp):
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
- print(
- 'Only compute capabilities 3.0 or higher are supported.'
- )
+ print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
@@ -1307,9 +1270,8 @@ def set_host_c_compiler(environ_cp):
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
- ask_for_var=(
- 'Please specify which C compiler should be used as the host '
- 'C compiler.'),
+ ask_for_var=('Please specify which C compiler should be used as the host '
+ 'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
@@ -1327,7 +1289,8 @@ def set_computecpp_toolkit_path(environ_cp):
else:
sycl_rt_lib_path = ''
- sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
+ sycl_rt_lib_path_full = os.path.join(toolkit_path,
+ sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
@@ -1355,8 +1318,8 @@ def set_trisycl_include_dir(environ_cp):
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
- '[Default is %s]: ') % (
- _DEFAULT_TRISYCL_INCLUDE_DIR)
+ '[Default is %s]: '
+ ) % (_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
@@ -1365,12 +1328,13 @@ def set_trisycl_include_dir(environ_cp):
if os.path.exists(trisycl_include_dir):
break
- print('Invalid triSYCL include directory, %s cannot be found' %
- (trisycl_include_dir))
+ print('Invalid triSYCL include directory, %s cannot be found'
+ % (trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
- write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
+ write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR',
+ trisycl_include_dir)
def set_mpi_home(environ_cp):
@@ -1380,8 +1344,8 @@ def set_mpi_home(environ_cp):
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
- exists = (os.path.exists(os.path.join(mpi_home, 'include'))
- and os.path.exists(os.path.join(mpi_home, 'lib')))
+ exists = (os.path.exists(os.path.join(mpi_home, 'include')) and
+ os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
@@ -1406,8 +1370,7 @@ def set_other_mpi_vars(environ_cp):
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
- if os.path.exists(
- os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
+ if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
@@ -1419,19 +1382,16 @@ def set_other_mpi_vars(environ_cp):
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
- os.path.join(mpi_home, 'include/mpicxx.h'),
- 'third_party/mpi/mpicxx.h')
+ os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
- os.path.join(mpi_home, 'lib/libmpi.so'),
- 'third_party/mpi/libmpi.so')
+ os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
- raise ValueError(
- 'Cannot find the MPI library file in %s/lib' % mpi_home)
+ raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_grpc_build_flags():
@@ -1442,14 +1402,36 @@ def set_build_strip_flag():
write_to_bazelrc('build --strip=always')
-def set_windows_build_flags():
- if is_windows():
- # The non-monolithic build is not supported yet
- write_to_bazelrc('build --config monolithic')
- # Suppress warning messages
- write_to_bazelrc('build --copt=-w --host_copt=-w')
- # Output more verbose information when something goes wrong
- write_to_bazelrc('build --verbose_failures')
+def set_windows_build_flags(environ_cp):
+ """Set Windows specific build options."""
+ # The non-monolithic build is not supported yet
+ write_to_bazelrc('build --config monolithic')
+ # Suppress warning messages
+ write_to_bazelrc('build --copt=-w --host_copt=-w')
+ # Output more verbose information when something goes wrong
+ write_to_bazelrc('build --verbose_failures')
+ # The host and target platforms are the same in Windows build. So we don't
+ # have to distinct them. This avoids building the same targets twice.
+ write_to_bazelrc('build --distinct_host_configuration=false')
+ # Enable short object file path to avoid long path issue on Windows.
+ # TODO(pcloudy): Remove this flag when upgrading Bazel to 0.16.0
+ # Short object file path will be enabled by default.
+ write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
+
+ if get_var(
+ environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
+ True,
+ ('Would you like to override eigen strong inline for some C++ '
+ 'compilation to reduce the compilation time?'),
+ 'Eigen strong inline overridden.',
+ 'Not overriding eigen strong inline, '
+ 'some compilations could take more than 20 mins.'):
+ # Due to a known MSVC compiler issue
+ # https://github.com/tensorflow/tensorflow/issues/10521
+ # Overriding eigen strong inline speeds up the compiling of
+ # conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
+ # but this also hurts the performance. Let users decide what they want.
+ write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
@@ -1459,18 +1441,17 @@ def config_info_line(name, help_text):
def main():
parser = argparse.ArgumentParser()
- parser.add_argument(
- "--workspace",
- type=str,
- default=_TF_WORKSPACE_ROOT,
- help="The absolute path to your active Bazel workspace.")
+ parser.add_argument("--workspace",
+ type=str,
+ default=_TF_WORKSPACE_ROOT,
+ help="The absolute path to your active Bazel workspace.")
args = parser.parse_args()
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
- check_bazel_version('0.10.0')
+ check_bazel_version('0.15.0')
reset_tf_configure_bazelrc(args.workspace)
cleanup_makefile()
@@ -1490,11 +1471,23 @@ def main():
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
+ environ_cp['TF_ENABLE_XLA'] = '0'
+ environ_cp['TF_NEED_GDR'] = '0'
+ environ_cp['TF_NEED_VERBS'] = '0'
+ environ_cp['TF_NEED_MPI'] = '0'
+ environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
+ # The numpy package on ppc64le uses OpenBLAS which has multi-threading
+ # issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
+ # runtime to allow the Tensorflow testcases which compare numpy
+ # results to Tensorflow results to succeed.
+ if is_ppc64le():
+ write_action_env_to_bazelrc("OMP_NUM_THREADS", 1)
+
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
@@ -1507,8 +1500,8 @@ def main():
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
- set_build_var(environ_cp, 'TF_NEED_GDR', 'GDR', 'with_gdr_support', False,
- 'gdr')
+ set_build_var(environ_cp, 'TF_NEED_GDR', 'GDR', 'with_gdr_support',
+ False, 'gdr')
set_build_var(environ_cp, 'TF_NEED_VERBS', 'VERBS', 'with_verbs_support',
False, 'verbs')
set_build_var(environ_cp, 'TF_NEED_NGRAPH', 'nGraph',
@@ -1518,16 +1511,15 @@ def main():
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
- set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP',
- True)
+ set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
- if (environ_cp.get('TF_NEED_CUDA') == '1'
- and 'TF_CUDA_CONFIG_REPO' not in environ_cp):
+ if (environ_cp.get('TF_NEED_CUDA') == '1' and
+ 'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
@@ -1536,7 +1528,7 @@ def main():
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
- 'LD_LIBRARY_PATH') != '1':
+ 'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
@@ -1569,23 +1561,28 @@ def main():
set_grpc_build_flags()
set_cc_opt_flags(environ_cp)
set_build_strip_flag()
- set_windows_build_flags()
+ if is_windows():
+ set_windows_build_flags(environ_cp)
- if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace',
- False,
- ('Would you like to interactively configure ./WORKSPACE for '
- 'Android builds?'), 'Searching for NDK and SDK installations.',
- 'Not configuring the WORKSPACE for Android builds.'):
+ if get_var(
+ environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace',
+ False,
+ ('Would you like to interactively configure ./WORKSPACE for '
+ 'Android builds?'),
+ 'Searching for NDK and SDK installations.',
+ 'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
- print('Preconfigured Bazel build configs. You can use any of the below by '
- 'adding "--config=<>" to your build command. See tools/bazel.rc for '
- 'more details.')
- config_info_line('mkl', 'Build with MKL support.')
- config_info_line('monolithic',
- 'Config for mostly static monolithic build.')
-
+ # On Windows, we don't have MKL support and the build is always monolithic.
+ # So no need to print the following message.
+ # TODO(pcloudy): remove the following if check when they make sense on Windows
+ if not is_windows():
+ print('Preconfigured Bazel build configs. You can use any of the below by '
+ 'adding "--config=<>" to your build command. See tools/bazel.rc for '
+ 'more details.')
+ config_info_line('mkl', 'Build with MKL support.')
+ config_info_line('monolithic', 'Config for mostly static monolithic build.')
if __name__ == '__main__':
main()
diff --git a/tensorflow/BUILD b/tensorflow/BUILD
index 6d443eb9f2..cee7114d3e 100644
--- a/tensorflow/BUILD
+++ b/tensorflow/BUILD
@@ -20,11 +20,17 @@ load(
"tf_additional_binary_deps",
)
load(
- "//tensorflow/tools/api/generator:api_gen.bzl",
+ "//tensorflow/python/tools/api/generator:api_gen.bzl",
"gen_api_init_files", # @unused
)
-load("//third_party/ngraph:build_defs.bzl", "if_ngraph")
+# Config setting used when building for products
+# which requires restricted licenses to be avoided.
+config_setting(
+ name = "no_lgpl_deps",
+ values = {"define": "__TENSORFLOW_NO_LGPL_DEPS__=1"},
+ visibility = ["//visibility:public"],
+)
# Config setting for determining if we are building for Android.
config_setting(
@@ -434,6 +440,8 @@ load(
"if_mkl",
)
+load("//third_party/ngraph:build_defs.bzl", "if_ngraph")
+
filegroup(
name = "intel_binary_blob",
data = if_mkl(
@@ -550,7 +558,7 @@ tf_cc_shared_object(
"//tensorflow/c:version_script.lds",
"//tensorflow/c/eager:c_api",
"//tensorflow/core:tensorflow",
- ]
+ ],
)
tf_cc_shared_object(
diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc
index 5c218d3f25..10bc8cdbee 100644
--- a/tensorflow/c/c_api.cc
+++ b/tensorflow/c/c_api.cc
@@ -33,6 +33,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/allocation_description.pb.h"
+#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -327,6 +328,7 @@ TF_Buffer* TF_NewBufferFromString(const void* proto, size_t proto_len) {
}
void TF_DeleteBuffer(TF_Buffer* buffer) {
+ if (buffer == nullptr) return;
if (buffer->data_deallocator != nullptr) {
(*buffer->data_deallocator)(const_cast<void*>(buffer->data),
buffer->length);
@@ -356,6 +358,7 @@ void TF_CloseDeprecatedSession(TF_DeprecatedSession* s, TF_Status* status) {
void TF_DeleteDeprecatedSession(TF_DeprecatedSession* s, TF_Status* status) {
status->status = Status::OK();
+ if (s == nullptr) return;
delete s->session;
delete s;
}
@@ -906,6 +909,7 @@ TF_Library* TF_LoadLibrary(const char* library_filename, TF_Status* status) {
TF_Buffer TF_GetOpList(TF_Library* lib_handle) { return lib_handle->op_list; }
void TF_DeleteLibraryHandle(TF_Library* lib_handle) {
+ if (lib_handle == nullptr) return;
tensorflow::port::Free(const_cast<void*>(lib_handle->op_list.data));
delete lib_handle;
}
@@ -963,6 +967,7 @@ TF_DEVICELIST_METHOD(const char*, TF_DeviceListName, name().c_str(), nullptr);
TF_DEVICELIST_METHOD(const char*, TF_DeviceListType, device_type().c_str(),
nullptr);
TF_DEVICELIST_METHOD(int64_t, TF_DeviceListMemoryBytes, memory_limit(), -1);
+TF_DEVICELIST_METHOD(uint64_t, TF_DeviceListIncarnation, incarnation(), 0);
#undef TF_DEVICELIST_METHOD
@@ -1852,6 +1857,7 @@ TF_Graph::TF_Graph()
TF_Graph* TF_NewGraph() { return new TF_Graph; }
void TF_DeleteGraph(TF_Graph* g) {
+ if (g == nullptr) return;
g->mu.lock();
g->delete_requested = true;
const bool del = g->sessions.empty();
@@ -2527,6 +2533,7 @@ void TF_CloseSession(TF_Session* s, TF_Status* status) {
void TF_DeleteSession(TF_Session* s, TF_Status* status) {
status->status = Status::OK();
+ if (s == nullptr) return;
TF_Graph* const graph = s->graph;
if (graph != nullptr) {
graph->mu.lock();
@@ -2725,7 +2732,34 @@ TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map, const char* name,
TF_Buffer* ret = TF_NewBuffer();
status->status = MessageToBuffer(*api_def, ret);
+ if (!status->status.ok()) {
+ TF_DeleteBuffer(ret);
+ return nullptr;
+ }
return ret;
#endif // __ANDROID__
}
+
+TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status) {
+ tensorflow::KernelList kernel_list = tensorflow::GetAllRegisteredKernels();
+ TF_Buffer* ret = TF_NewBuffer();
+ status->status = MessageToBuffer(kernel_list, ret);
+ if (!status->status.ok()) {
+ TF_DeleteBuffer(ret);
+ return nullptr;
+ }
+ return ret;
+}
+
+TF_Buffer* TF_GetRegisteredKernelsForOp(const char* name, TF_Status* status) {
+ tensorflow::KernelList kernel_list =
+ tensorflow::GetRegisteredKernelsForOp(name);
+ TF_Buffer* ret = TF_NewBuffer();
+ status->status = MessageToBuffer(kernel_list, ret);
+ if (!status->status.ok()) {
+ TF_DeleteBuffer(ret);
+ return nullptr;
+ }
+ return ret;
+}
} // end extern "C"
diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h
index 1eb75ef11f..c8ae6f2dd1 100644
--- a/tensorflow/c/c_api.h
+++ b/tensorflow/c/c_api.h
@@ -44,6 +44,7 @@ limitations under the License.
// * size_t is used to represent byte sizes of objects that are
// materialized in the address space of the calling process.
// * int is used as an index into arrays.
+// * Deletion functions are safe to call on nullptr.
//
// Questions left to address:
// * Might at some point need a way for callers to provide their own Env.
@@ -1521,6 +1522,13 @@ TF_CAPI_EXPORT extern const char* TF_DeviceListType(const TF_DeviceList* list,
TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes(
const TF_DeviceList* list, int index, TF_Status* status);
+// Retrieve the incarnation number of a given device.
+//
+// If index is out of bounds, an error code will be set in the status object,
+// and 0 will be returned.
+TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation(
+ const TF_DeviceList* list, int index, TF_Status* status);
+
// --------------------------------------------------------------------------
// Load plugins containing custom ops and kernels
@@ -1603,6 +1611,18 @@ TF_CAPI_EXPORT extern TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map,
size_t name_len,
TF_Status* status);
+// --------------------------------------------------------------------------
+// Kernel definition information.
+
+// Returns a serialized KernelList protocol buffer containing KernelDefs for all
+// registered kernels.
+TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status);
+
+// Returns a serialized KernelList protocol buffer containing KernelDefs for all
+// kernels registered for the operation named `name`.
+TF_CAPI_EXPORT extern TF_Buffer* TF_GetRegisteredKernelsForOp(
+ const char* name, TF_Status* status);
+
#ifdef __cplusplus
} /* end extern "C" */
#endif
diff --git a/tensorflow/c/c_api_function_test.cc b/tensorflow/c/c_api_function_test.cc
index 610274696f..f7ca219c89 100644
--- a/tensorflow/c/c_api_function_test.cc
+++ b/tensorflow/c/c_api_function_test.cc
@@ -1516,7 +1516,8 @@ void DefineStatefulFunction(const char* name, TF_Function** func) {
TF_Output inputs[] = {};
TF_Output outputs[] = {{random, 0}};
- *func = TF_GraphToFunction(func_graph.get(), name, /*append_hash=*/false, -1,
+ *func = TF_GraphToFunction(func_graph.get(), name,
+ /*append_hash_to_fn_name=*/false, -1,
/*opers=*/nullptr, 0, inputs, 1, outputs,
/*output_names=*/nullptr,
/*opts=*/nullptr, "", s.get());
diff --git a/tensorflow/c/c_api_test.cc b/tensorflow/c/c_api_test.cc
index bc04b53fbb..e674b1623c 100644
--- a/tensorflow/c/c_api_test.cc
+++ b/tensorflow/c/c_api_test.cc
@@ -29,9 +29,11 @@ limitations under the License.
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/graph.pb_text.h"
+#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/framework/node_def.pb_text.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
@@ -1424,6 +1426,29 @@ TEST(CAPI, SavedModelNullArgsAreValid) {
TF_DeleteStatus(s);
}
+TEST(CAPI, DeletingNullPointerIsSafe) {
+ TF_Status* status = TF_NewStatus();
+
+ TF_DeleteStatus(nullptr);
+ TF_DeleteBuffer(nullptr);
+ TF_DeleteTensor(nullptr);
+ TF_DeleteSessionOptions(nullptr);
+ TF_DeleteGraph(nullptr);
+ TF_DeleteImportGraphDefOptions(nullptr);
+ TF_DeleteImportGraphDefResults(nullptr);
+ TF_DeleteFunction(nullptr);
+ TF_DeleteSession(nullptr, status);
+ EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ TF_DeletePRunHandle(nullptr);
+ TF_DeleteDeprecatedSession(nullptr, status);
+ EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ TF_DeleteDeviceList(nullptr);
+ TF_DeleteLibraryHandle(nullptr);
+ TF_DeleteApiDefMap(nullptr);
+
+ TF_DeleteStatus(status);
+}
+
REGISTER_OP("TestOpWithNoGradient")
.Input("x: T")
.Output("y: T")
@@ -2312,6 +2337,57 @@ TEST(TestApiDef, TestCreateApiDefWithOverwrites) {
TF_DeleteLibraryHandle(lib);
}
+class DummyKernel : public tensorflow::OpKernel {
+ public:
+ explicit DummyKernel(tensorflow::OpKernelConstruction* context)
+ : OpKernel(context) {}
+ void Compute(tensorflow::OpKernelContext* context) override {}
+};
+
+// Test we can query kernels
+REGISTER_OP("TestOpWithSingleKernel")
+ .Input("a: float")
+ .Input("b: float")
+ .Output("o: float");
+REGISTER_KERNEL_BUILDER(
+ Name("TestOpWithSingleKernel").Device(tensorflow::DEVICE_CPU), DummyKernel);
+
+TEST(TestKernel, TestGetAllRegisteredKernels) {
+ TF_Status* status = TF_NewStatus();
+ TF_Buffer* kernel_list_buf = TF_GetAllRegisteredKernels(status);
+ EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ KernelList kernel_list;
+ kernel_list.ParseFromArray(kernel_list_buf->data, kernel_list_buf->length);
+ ASSERT_GT(kernel_list.kernel_size(), 0);
+ TF_DeleteBuffer(kernel_list_buf);
+ TF_DeleteStatus(status);
+}
+
+TEST(TestKernel, TestGetRegisteredKernelsForOp) {
+ TF_Status* status = TF_NewStatus();
+ TF_Buffer* kernel_list_buf =
+ TF_GetRegisteredKernelsForOp("TestOpWithSingleKernel", status);
+ EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ KernelList kernel_list;
+ kernel_list.ParseFromArray(kernel_list_buf->data, kernel_list_buf->length);
+ ASSERT_EQ(kernel_list.kernel_size(), 1);
+ EXPECT_EQ(kernel_list.kernel(0).op(), "TestOpWithSingleKernel");
+ EXPECT_EQ(kernel_list.kernel(0).device_type(), "CPU");
+ TF_DeleteBuffer(kernel_list_buf);
+ TF_DeleteStatus(status);
+}
+
+TEST(TestKernel, TestGetRegisteredKernelsForOpNoKernels) {
+ TF_Status* status = TF_NewStatus();
+ TF_Buffer* kernel_list_buf = TF_GetRegisteredKernelsForOp("Unknown", status);
+ EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
+ KernelList kernel_list;
+ kernel_list.ParseFromArray(kernel_list_buf->data, kernel_list_buf->length);
+ ASSERT_EQ(kernel_list.kernel_size(), 0);
+ TF_DeleteBuffer(kernel_list_buf);
+ TF_DeleteStatus(status);
+}
+
#undef EXPECT_TF_META
} // namespace
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
index 82ca2be2cf..6c510536d6 100644
--- a/tensorflow/c/eager/c_api.cc
+++ b/tensorflow/c/eager/c_api.cc
@@ -664,17 +664,17 @@ TFE_TensorHandle* TFE_NewTensorHandle(const tensorflow::Tensor& t) {
const tensorflow::Tensor* TFE_TensorHandleUnderlyingTensorInHostMemory(
TFE_TensorHandle* h, TF_Status* status) {
- tensorflow::Device* d = nullptr;
- tensorflow::Device* op_device = nullptr;
- const tensorflow::Tensor* t = nullptr;
- status->status = h->handle->TensorAndDevice(&t, &d, &op_device);
- if (!status->status.ok()) return nullptr;
- if (d != nullptr) {
+ if (!h->handle->OnHostCPU()) {
status->status = tensorflow::errors::FailedPrecondition(
"TFE_TensorHandle is placed in device (not host) memory. Cannot return "
"a tensorflow::Tensor");
return nullptr;
}
+ tensorflow::Device* d = nullptr;
+ tensorflow::Device* op_device = nullptr;
+ const tensorflow::Tensor* t = nullptr;
+ status->status = h->handle->TensorAndDevice(&t, &d, &op_device);
+ if (!status->status.ok()) return nullptr;
return t;
}
diff --git a/tensorflow/c/python_api.cc b/tensorflow/c/python_api.cc
index e18fdf6c57..8486b585c8 100644
--- a/tensorflow/c/python_api.cc
+++ b/tensorflow/c/python_api.cc
@@ -155,7 +155,7 @@ void SetResourceHandleShapeAndType(TF_Graph* graph, TF_Output output,
tensorflow::shape_inference::ShapeHandle shape;
status->status =
ic->MakeShapeFromShapeProto(shape_and_type_proto.shape(), &shape);
- if (status->status.ok()) return;
+ if (!status->status.ok()) return;
shapes_and_types.emplace_back(shape, shape_and_type_proto.dtype());
}
ic->set_output_handle_shapes_and_types(output.index, shapes_and_types);
diff --git a/tensorflow/cc/gradients/nn_grad.cc b/tensorflow/cc/gradients/nn_grad.cc
index c73482d5f4..588e96cb19 100644
--- a/tensorflow/cc/gradients/nn_grad.cc
+++ b/tensorflow/cc/gradients/nn_grad.cc
@@ -47,6 +47,72 @@ Status SoftmaxGrad(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("Softmax", SoftmaxGrad);
+bool IsZero(const Scope& scope, const Output& grad) {
+ string op_type_name = grad.op().node()->type_string();
+ if (op_type_name == "ZerosLike" || op_type_name == "Zeros") {
+ return true;
+ }
+ // The Operation we were provided is not named something obvious so
+ // we need to actually look at its contents.
+ // The original python code did this by calling a utility function called
+ // tensor_util.constant_value.
+ // There is no C++ equivalent to tensor_util.constant_value so we do nothing
+ // for the moment.
+ return false;
+}
+
+// Multiply after broadcasting vec to match dimensions of mat.
+// Args:
+// vec: A 1-D tensor of dimension [D0]
+// mat: A 2-D tensor of dimesnion [D0, D1]
+//
+// Returns:
+// A tensor of dimension [D0, D1], the result fo vec * mat.
+Output BroadcastMul(const Scope& scope, const Output& vec, const Output& mat) {
+ auto reshaped = ExpandDims(scope, vec, -1);
+ return Multiply(scope, reshaped, mat);
+}
+
+Status SoftmaxCrossEntropyWithLogitsGrad(const Scope& scope,
+ const Operation& op,
+ const std::vector<Output>& grad_inputs,
+ std::vector<Output>* grad_outputs) {
+ // Softmax gradient with cross entropy logits function.
+ // We multiply the backprop for cost with the gradients - op.output[1].
+ // There is no gradient for labels.
+
+ // The outputs of the network are at input index 0.
+ auto logits = op.input(0);
+ // The "truth" labels are at index 1.
+ auto softmax_grad = op.output(1);
+
+ // The loss is the output at index 0, and backprop is the output at index 1.
+ auto grad_loss = grad_inputs[0];
+ auto grad_grad = grad_inputs[1];
+
+ auto grad = BroadcastMul(scope, grad_loss, softmax_grad);
+ if (!IsZero(scope, grad_grad)) {
+ std::vector<int> axis;
+ auto logits_softmax = Softmax(scope, logits);
+
+ auto grad_grad_expand = ExpandDims(scope, grad_grad, 1);
+ auto logits_softmax_expand = ExpandDims(scope, logits_softmax, 2);
+ auto matmul_result =
+ BatchMatMul(scope, grad_grad_expand, logits_softmax_expand);
+ axis.push_back(1);
+ auto squeeze_result = Squeeze(scope, matmul_result, Squeeze::Axis(axis));
+ auto subtraction_result = Subtract(scope, grad_grad, squeeze_result);
+ auto multiply_result = Multiply(scope, subtraction_result, logits_softmax);
+ grad = Add(scope, grad, multiply_result);
+ }
+ auto minus_log_softmax = Multiply(scope, LogSoftmax(scope, logits), -1.0f);
+ grad_outputs->push_back(grad);
+ grad_outputs->push_back(BroadcastMul(scope, grad_loss, minus_log_softmax));
+ return scope.status();
+}
+REGISTER_GRADIENT_OP("SoftmaxCrossEntropyWithLogits",
+ SoftmaxCrossEntropyWithLogitsGrad);
+
Status LogSoftmaxGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
@@ -195,9 +261,9 @@ Status MaxPool3DGradHelper(const Scope& scope, const Operation& op,
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
MaxPool3DGrad::Attrs grad_attrs;
- auto dx = MaxPool3DGrad(scope, op.input(0), op.output(0), grad_inputs[0],
- ksize, strides, padding,
- grad_attrs.DataFormat(data_format));
+ auto dx =
+ MaxPool3DGrad(scope, op.input(0), op.output(0), grad_inputs[0], ksize,
+ strides, padding, grad_attrs.DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
@@ -216,10 +282,9 @@ Status AvgPoolGradHelper(const Scope& scope, const Operation& op,
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
internal::AvgPoolGrad::Attrs grad_attrs;
- auto dx =
- internal::AvgPoolGrad(scope, Shape(scope, op.input(0)), grad_inputs[0],
- ksize, strides, padding,
- grad_attrs.DataFormat(data_format));
+ auto dx = internal::AvgPoolGrad(scope, Shape(scope, op.input(0)),
+ grad_inputs[0], ksize, strides, padding,
+ grad_attrs.DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
@@ -238,9 +303,9 @@ Status AvgPool3DGradHelper(const Scope& scope, const Operation& op,
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
AvgPool3DGrad::Attrs grad_attrs;
- auto dx = AvgPool3DGrad(scope, Shape(scope, op.input(0)), grad_inputs[0],
- ksize, strides, padding,
- grad_attrs.DataFormat(data_format));
+ auto dx =
+ AvgPool3DGrad(scope, Shape(scope, op.input(0)), grad_inputs[0], ksize,
+ strides, padding, grad_attrs.DataFormat(data_format));
grad_outputs->push_back(dx);
return scope.status();
}
diff --git a/tensorflow/cc/gradients/nn_grad_test.cc b/tensorflow/cc/gradients/nn_grad_test.cc
index b4d457a9d1..aa72cf7ba2 100644
--- a/tensorflow/cc/gradients/nn_grad_test.cc
+++ b/tensorflow/cc/gradients/nn_grad_test.cc
@@ -25,6 +25,8 @@ limitations under the License.
namespace tensorflow {
namespace {
+using ops::AvgPool;
+using ops::AvgPool3D;
using ops::BiasAdd;
using ops::Conv2D;
using ops::Elu;
@@ -33,11 +35,9 @@ using ops::FractionalMaxPool;
using ops::L2Loss;
using ops::LogSoftmax;
using ops::LRN;
-using ops::AvgPool;
-using ops::AvgPool3D;
using ops::MaxPool;
-using ops::MaxPoolV2;
using ops::MaxPool3D;
+using ops::MaxPoolV2;
using ops::Placeholder;
using ops::Relu;
using ops::Relu6;
@@ -111,6 +111,20 @@ TEST_F(NNGradTest, SoftmaxGrad) {
RunTest(x, shape, y, shape);
}
+TEST_F(NNGradTest, SoftmaxCrossEntropyWithLogitsGrad) {
+ TensorShape logits_shape({5, 3});
+ TensorShape loss_shape({5});
+
+ auto logits = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(logits_shape));
+ auto labels = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(logits_shape));
+ auto y =
+ tensorflow::ops::SoftmaxCrossEntropyWithLogits(scope_, logits, labels);
+ // Note the reversal of the backprop and loss orders. Issue #18734 has been
+ // opened for this.
+ RunTest({logits, labels}, {logits_shape, logits_shape}, {y.backprop, y.loss},
+ {logits_shape, loss_shape});
+}
+
TEST_F(NNGradTest, LogSoftmaxGrad) {
TensorShape shape({5, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
@@ -253,7 +267,7 @@ TEST_F(NNGradTest, AvgPool3DGradHelper) {
RunTest(x, x_shape, y, y_shape);
}
-TEST_F(NNGradTest, LRN){
+TEST_F(NNGradTest, LRN) {
TensorShape x_shape({1, 1, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = LRN(scope_, x);
diff --git a/tensorflow/cc/saved_model/BUILD b/tensorflow/cc/saved_model/BUILD
index 06a3be18e0..3d3895c8fa 100644
--- a/tensorflow/cc/saved_model/BUILD
+++ b/tensorflow/cc/saved_model/BUILD
@@ -34,6 +34,46 @@ cc_library(
)
cc_library(
+ name = "reader",
+ srcs = ["reader.cc"],
+ hdrs = ["reader.h"],
+ deps = [
+ ":constants",
+ ] + if_not_mobile([
+ # TODO(b/111634734): :lib and :protos_all contain dependencies that
+ # cannot be built on mobile platforms. Instead, include the appropriate
+ # tf_lib depending on the build platform.
+ "//tensorflow/core:lib",
+ "//tensorflow/core:protos_all_cc",
+ ]) + if_mobile([
+ # Mobile-friendly SavedModel proto. See go/portable-proto for more info.
+ "//tensorflow/core:saved_model_portable_proto",
+ ]) + if_android([
+ "//tensorflow/core:android_tensorflow_lib",
+ ]) + if_ios([
+ "//tensorflow/core:ios_tensorflow_lib",
+ ]),
+)
+
+tf_cc_test(
+ name = "reader_test",
+ srcs = ["reader_test.cc"],
+ data = [
+ ":saved_model_half_plus_two",
+ ],
+ linkstatic = 1,
+ deps = [
+ ":constants",
+ ":reader",
+ ":tag_constants",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
+ ],
+)
+
+cc_library(
name = "loader",
hdrs = ["loader.h"],
deps = [
@@ -54,6 +94,7 @@ cc_library(
hdrs = ["loader.h"],
deps = [
":constants",
+ ":reader",
] + if_not_mobile([
"//tensorflow/core:core_cpu",
"//tensorflow/core:framework",
diff --git a/tensorflow/cc/saved_model/loader.cc b/tensorflow/cc/saved_model/loader.cc
index faa1e378d0..07807ed2f3 100644
--- a/tensorflow/cc/saved_model/loader.cc
+++ b/tensorflow/cc/saved_model/loader.cc
@@ -18,8 +18,10 @@ limitations under the License.
#include <unordered_set>
#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/monitoring/counter.h"
+#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf_internal.h"
@@ -43,56 +45,6 @@ auto* load_latency = monitoring::Counter<1>::New(
constexpr char kLoadAttemptFail[] = "fail";
constexpr char kLoadAttemptSuccess[] = "success";
-Status ReadSavedModel(const string& export_dir, SavedModel* saved_model_proto) {
- const string saved_model_pb_path =
- io::JoinPath(export_dir, kSavedModelFilenamePb);
- if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
- return ReadBinaryProto(Env::Default(), saved_model_pb_path,
- saved_model_proto);
- }
- const string saved_model_pbtxt_path =
- io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
- if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
- return ReadTextProto(Env::Default(), saved_model_pbtxt_path,
- saved_model_proto);
- }
- return Status(error::Code::NOT_FOUND,
- "Could not find SavedModel .pb or .pbtxt at supplied export "
- "directory path: " +
- export_dir);
-}
-
-string GetTagsAsString(const std::unordered_set<string>& tags) {
- string tags_as_string = "{ ";
- for (const string& tag : tags) {
- tags_as_string = strings::StrCat(tags_as_string, tag, " ");
- }
- tags_as_string = strings::StrCat(tags_as_string, "}");
- return tags_as_string;
-}
-
-Status FindMetaGraphDefToLoad(const SavedModel& saved_model_proto,
- const std::unordered_set<string>& tags,
- MetaGraphDef* meta_graph_def_to_load) {
- for (const MetaGraphDef& meta_graph_def : saved_model_proto.meta_graphs()) {
- // Get tags from the meta_graph_def.
- std::unordered_set<string> graph_tags;
- for (const string& tag : meta_graph_def.meta_info_def().tags()) {
- graph_tags.insert(tag);
- }
- // Match with the set of tags provided.
- if (graph_tags == tags) {
- *meta_graph_def_to_load = meta_graph_def;
- return Status::OK();
- }
- }
- return Status(error::Code::NOT_FOUND,
- "Could not find meta graph def matching supplied tags: " +
- GetTagsAsString(tags) +
- ". To inspect available tag-sets in the SavedModel, please "
- "use the SavedModel CLI: `saved_model_cli`");
-}
-
Status LoadMetaGraphIntoSession(const MetaGraphDef& meta_graph_def,
const SessionOptions& session_options,
std::unique_ptr<Session>* session) {
@@ -235,18 +187,8 @@ Status LoadSavedModelInternal(const SessionOptions& session_options,
const string& export_dir,
const std::unordered_set<string>& tags,
SavedModelBundle* const bundle) {
- if (!MaybeSavedModelDirectory(export_dir)) {
- return Status(error::Code::NOT_FOUND,
- "SavedModel not found in export directory: " + export_dir);
- }
- LOG(INFO) << "Loading SavedModel with tags: " << GetTagsAsString(tags)
- << "; from: " << export_dir;
-
- SavedModel saved_model_proto;
- TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
-
- TF_RETURN_IF_ERROR(
- FindMetaGraphDefToLoad(saved_model_proto, tags, &bundle->meta_graph_def));
+ TF_RETURN_IF_ERROR(ReadMetaGraphDefFromSavedModel(export_dir, tags,
+ &bundle->meta_graph_def));
TF_RETURN_IF_ERROR(LoadMetaGraphIntoSession(
bundle->meta_graph_def, session_options, &bundle->session));
@@ -288,8 +230,8 @@ Status LoadSavedModel(const SessionOptions& session_options,
return end_microseconds - start_microseconds;
}();
auto log_and_count = [&](const string& status_str) {
- LOG(INFO) << "SavedModel load for tags " << GetTagsAsString(tags)
- << "; Status: " << status_str << ". Took "
+ LOG(INFO) << "SavedModel load for tags { " << str_util::Join(tags, " ")
+ << " }; Status: " << status_str << ". Took "
<< load_latency_microsecs << " microseconds.";
load_attempt_count->GetCell(export_dir, status_str)->IncrementBy(1);
};
diff --git a/tensorflow/cc/saved_model/reader.cc b/tensorflow/cc/saved_model/reader.cc
new file mode 100644
index 0000000000..2146c8a197
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader.cc
@@ -0,0 +1,88 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/saved_model/reader.h"
+
+#include <unordered_set>
+
+#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/protobuf/saved_model.pb.h"
+
+namespace tensorflow {
+namespace {
+
+Status ReadSavedModel(const string& export_dir, SavedModel* saved_model_proto) {
+ LOG(INFO) << "Reading SavedModel from: " << export_dir;
+
+ const string saved_model_pb_path =
+ io::JoinPath(export_dir, kSavedModelFilenamePb);
+ if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
+ return ReadBinaryProto(Env::Default(), saved_model_pb_path,
+ saved_model_proto);
+ }
+ const string saved_model_pbtxt_path =
+ io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
+ if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
+ return ReadTextProto(Env::Default(), saved_model_pbtxt_path,
+ saved_model_proto);
+ }
+ return Status(error::Code::NOT_FOUND,
+ "Could not find SavedModel .pb or .pbtxt at supplied export "
+ "directory path: " +
+ export_dir);
+}
+
+Status FindMetaGraphDef(const SavedModel& saved_model_proto,
+ const std::unordered_set<string>& tags,
+ MetaGraphDef* meta_graph_def) {
+ LOG(INFO) << "Reading meta graph with tags { " << str_util::Join(tags, " ")
+ << " }";
+ for (const MetaGraphDef& graph_def : saved_model_proto.meta_graphs()) {
+ // Get tags from the graph_def.
+ std::unordered_set<string> graph_tags;
+ for (const string& tag : graph_def.meta_info_def().tags()) {
+ graph_tags.insert(tag);
+ }
+ // Match with the set of tags provided.
+ if (graph_tags == tags) {
+ *meta_graph_def = graph_def;
+ return Status::OK();
+ }
+ }
+ return Status(
+ error::Code::NOT_FOUND,
+ strings::StrCat(
+ "Could not find meta graph def matching supplied tags: { ",
+ str_util::Join(tags, " "),
+ " }. To inspect available tag-sets in the SavedModel, please "
+ "use the SavedModel CLI: `saved_model_cli`"));
+}
+
+} // namespace
+
+Status ReadMetaGraphDefFromSavedModel(const string& export_dir,
+ const std::unordered_set<string>& tags,
+ MetaGraphDef* const meta_graph_def) {
+ SavedModel saved_model_proto;
+ TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
+ TF_RETURN_IF_ERROR(FindMetaGraphDef(saved_model_proto, tags, meta_graph_def));
+ return Status::OK();
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/cc/saved_model/reader.h b/tensorflow/cc/saved_model/reader.h
new file mode 100644
index 0000000000..5815108df2
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader.h
@@ -0,0 +1,39 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+/// Functions to read the SavedModel proto, or parts of it.
+
+#ifndef TENSORFLOW_CC_SAVED_MODEL_READER_H_
+#define TENSORFLOW_CC_SAVED_MODEL_READER_H_
+
+#include <string>
+#include <unordered_set>
+
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/protobuf/meta_graph.pb.h"
+
+namespace tensorflow {
+
+// Reads the SavedModel proto from saved_model.pb(txt) in the given directory,
+// finds the MetaGraphDef that matches the given set of tags and writes it to
+// the `meta_graph_def` parameter. Returns a failure status when the SavedModel
+// file does not exist or no MetaGraphDef matches the tags.
+Status ReadMetaGraphDefFromSavedModel(const string& export_dir,
+ const std::unordered_set<string>& tags,
+ MetaGraphDef* const meta_graph_def);
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CC_SAVED_MODEL_READER_H_
diff --git a/tensorflow/cc/saved_model/reader_test.cc b/tensorflow/cc/saved_model/reader_test.cc
new file mode 100644
index 0000000000..620e9c2eec
--- /dev/null
+++ b/tensorflow/cc/saved_model/reader_test.cc
@@ -0,0 +1,108 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/saved_model/reader.h"
+
+#include "tensorflow/cc/saved_model/constants.h"
+#include "tensorflow/cc/saved_model/tag_constants.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+constexpr char kTestDataPbTxt[] =
+ "cc/saved_model/testdata/half_plus_two_pbtxt/00000123";
+constexpr char kTestDataSharded[] =
+ "cc/saved_model/testdata/half_plus_two/00000123";
+
+class ReaderTest : public ::testing::Test {
+ protected:
+ ReaderTest() {}
+
+ void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {
+ const auto& tags = meta_graph_def.meta_info_def().tags();
+ EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=
+ tags.end());
+ EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), "");
+ EXPECT_EQ(
+ meta_graph_def.signature_def().at("serving_default").method_name(),
+ "tensorflow/serving/predict");
+ }
+};
+
+TEST_F(ReaderTest, TagMatch) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+ TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+ &meta_graph_def));
+ CheckMetaGraphDef(meta_graph_def);
+}
+
+TEST_F(ReaderTest, NoTagMatch) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+ Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"},
+ &meta_graph_def);
+ EXPECT_FALSE(st.ok());
+ EXPECT_TRUE(str_util::StrContains(
+ st.error_message(),
+ "Could not find meta graph def matching supplied tags: { missing-tag }"))
+ << st.error_message();
+}
+
+TEST_F(ReaderTest, NoTagMatchMultiple) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataSharded);
+ Status st = ReadMetaGraphDefFromSavedModel(
+ export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def);
+ EXPECT_FALSE(st.ok());
+ EXPECT_TRUE(str_util::StrContains(
+ st.error_message(),
+ "Could not find meta graph def matching supplied tags: "))
+ << st.error_message();
+}
+
+TEST_F(ReaderTest, PbtxtFormat) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPbTxt);
+ TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+ &meta_graph_def));
+ CheckMetaGraphDef(meta_graph_def);
+}
+
+TEST_F(ReaderTest, InvalidExportPath) {
+ MetaGraphDef meta_graph_def;
+
+ const string export_dir =
+ io::JoinPath(testing::TensorFlowSrcRoot(), "missing-path");
+ Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
+ &meta_graph_def);
+ EXPECT_FALSE(st.ok());
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/BUILD b/tensorflow/compiler/jit/BUILD
index c2245b8eae..9174a67cc6 100644
--- a/tensorflow/compiler/jit/BUILD
+++ b/tensorflow/compiler/jit/BUILD
@@ -304,11 +304,13 @@ cc_library(
name = "compilation_passes",
srcs = [
"build_xla_launch_ops_pass.cc",
+ "deadness_analysis.cc",
"encapsulate_subgraphs_pass.cc",
"mark_for_compilation_pass.cc",
],
hdrs = [
"build_xla_launch_ops_pass.h",
+ "deadness_analysis.h",
"encapsulate_subgraphs_pass.h",
"mark_for_compilation_pass.h",
],
@@ -325,6 +327,7 @@ cc_library(
"//tensorflow/compiler/tf2xla:dump_graph",
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/xla:status_macros",
+ "//tensorflow/compiler/xla:util",
"//tensorflow/core:core_cpu",
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:framework",
@@ -377,6 +380,7 @@ tf_cc_test(
name = "compilation_passes_test",
size = "small",
srcs = [
+ "deadness_analysis_test.cc",
"encapsulate_subgraphs_pass_test.cc",
"mark_for_compilation_pass_test.cc",
],
@@ -387,6 +391,7 @@ tf_cc_test(
"//tensorflow/cc:cc_ops_internal",
"//tensorflow/cc:function_ops",
"//tensorflow/cc:ops",
+ "//tensorflow/cc:sendrecv_ops",
"//tensorflow/compiler/jit/kernels:xla_launch_op",
"//tensorflow/compiler/tf2xla:xla_compiler",
"//tensorflow/compiler/tf2xla/kernels:xla_ops",
@@ -458,6 +463,7 @@ cc_library(
visibility = ["//visibility:public"],
deps = [
":common",
+ ":compilation_passes",
":union_find",
":xla_cluster_util",
"//tensorflow/compiler/jit/graphcycles",
diff --git a/tensorflow/compiler/jit/deadness_analysis.cc b/tensorflow/compiler/jit/deadness_analysis.cc
new file mode 100644
index 0000000000..d81e5fe900
--- /dev/null
+++ b/tensorflow/compiler/jit/deadness_analysis.cc
@@ -0,0 +1,566 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/jit/deadness_analysis.h"
+#include "tensorflow/core/graph/algorithm.h"
+#include "tensorflow/core/graph/tensor_id.h"
+#include "tensorflow/core/lib/gtl/flatset.h"
+#include "tensorflow/core/lib/hash/hash.h"
+
+// ALGORITHM OVERVIEW
+//
+// We map every output produced by each node in the TensorFlow graph (including
+// control dependence) into an instance of the Predicate class. Instances of
+// Predicate denote logical formulas and mapping a node `n` to a predicate
+// `pred` implies that `n` is executed whenver `pred` is true. Then we can
+// deduce mismatching liveness in the inputs to node by comparing the predicate
+// those inputs are mapped to.
+//
+// Loops are handled pessimistically -- we map Merge nodes with backedges to
+// uninterpreted symbols (the same kind we use to represent Switch and _Recv).
+// Predicate equality has to hold over all possible assignments to these
+// uninterpreted symbols.
+
+namespace tensorflow {
+
+namespace {
+
+// Represents a logical predicate, used as described in the algorithm overview
+// above.
+class Predicate {
+ public:
+ enum class Kind { kAnd, kOr, kNot, kSymbol };
+
+ virtual string ToString() const = 0;
+ int64 hash() const { return hash_; }
+
+ virtual Kind kind() const = 0;
+ virtual ~Predicate() {}
+
+ protected:
+ explicit Predicate(int64 hash) : hash_(hash) {}
+
+ private:
+ const int64 hash_;
+
+ TF_DISALLOW_COPY_AND_ASSIGN(Predicate);
+};
+
+int64 HashPredicateSequence(Predicate::Kind kind,
+ gtl::ArraySlice<Predicate*> preds) {
+ int64 hash = ::tensorflow::hash<Predicate::Kind>()(kind);
+ for (Predicate* pred : preds) {
+ hash = Hash64Combine(hash, pred->hash());
+ }
+ return hash;
+}
+
+// Represents a logical conjunction of a set of predicates.
+class AndPredicate : public Predicate {
+ public:
+ explicit AndPredicate(std::vector<Predicate*> operands)
+ : Predicate(HashPredicateSequence(Kind::kAnd, operands)),
+ operands_(std::move(operands)) {}
+
+ string ToString() const override {
+ if (operands().empty()) {
+ return "#true";
+ }
+
+ std::vector<string> operands_str;
+ std::transform(operands().begin(), operands().end(),
+ std::back_inserter(operands_str),
+ [](Predicate* pred) { return pred->ToString(); });
+
+ return strings::StrCat("(", str_util::Join(operands_str, " & "), ")");
+ }
+
+ Kind kind() const override { return Kind::kAnd; }
+
+ const gtl::ArraySlice<Predicate*> operands() const { return operands_; }
+
+ private:
+ std::vector<Predicate*> operands_;
+};
+
+// Represents a logical disjunction of a set of predicates.
+class OrPredicate : public Predicate {
+ public:
+ explicit OrPredicate(std::vector<Predicate*> operands)
+ : Predicate(HashPredicateSequence(Kind::kOr, operands)),
+ operands_(std::move(operands)) {}
+
+ string ToString() const override {
+ if (operands().empty()) {
+ return "#false";
+ }
+
+ std::vector<string> operands_str;
+ std::transform(operands().begin(), operands().end(),
+ std::back_inserter(operands_str),
+ [](Predicate* pred) { return pred->ToString(); });
+
+ return strings::StrCat("(", str_util::Join(operands_str, " | "), ")");
+ }
+
+ Kind kind() const override { return Kind::kOr; }
+ const gtl::ArraySlice<Predicate*> operands() const { return operands_; }
+
+ private:
+ std::vector<Predicate*> operands_;
+};
+
+// Represents a logical negation of a set of predicates.
+class NotPredicate : public Predicate {
+ public:
+ explicit NotPredicate(Predicate* operand)
+ : Predicate(HashPredicateSequence(Kind::kNot, {operand})),
+ operand_(operand) {}
+
+ string ToString() const override {
+ return strings::StrCat("~", operand()->ToString());
+ }
+
+ Kind kind() const override { return Kind::kNot; }
+ Predicate* operand() const { return operand_; }
+
+ private:
+ Predicate* operand_;
+};
+
+// Represents an uninterpreted symbol in a logical predicate.
+//
+// Two predicates are equivalent iff they are equivalent for all assignments to
+// the symbols contained in them.
+class SymbolPredicate : public Predicate {
+ public:
+ explicit SymbolPredicate(TensorId tensor_id, bool must_be_true)
+ : Predicate(Hash(tensor_id, must_be_true)),
+ tensor_id_(std::move(tensor_id)),
+ must_be_true_(must_be_true) {}
+
+ string ToString() const override { return tensor_id_.ToString(); }
+ Kind kind() const override { return Kind::kSymbol; }
+
+ // If `must_be_true()` is true this SymbolPredicate represents the proposition
+ // "tensor_id() is live and evaluates to true".
+ //
+ // If `must_be_true()` is false then this SymbolPredicate represents the
+ // proposition "tensor_id() is live (and may evalutate to any value)"
+ TensorId tensor_id() const { return tensor_id_; }
+ bool must_be_true() const { return must_be_true_; }
+
+ private:
+ TensorId tensor_id_;
+ bool must_be_true_;
+
+ static int64 Hash(const TensorId tensor_id, bool must_be_true) {
+ return Hash64Combine(
+ ::tensorflow::hash<bool>()(must_be_true),
+ Hash64Combine(::tensorflow::hash<Predicate::Kind>()(Kind::kSymbol),
+ TensorId::Hasher{}(tensor_id)));
+ }
+};
+
+// Creates and owns Predicate instances. Simplifies predicates as it creates
+// them.
+class PredicateFactory {
+ public:
+ Predicate* MakeAndPredicate(gtl::ArraySlice<Predicate*> operands) {
+ return MakeAndOrImpl(operands, /*is_and=*/true);
+ }
+
+ Predicate* MakeOrPredicate(gtl::ArraySlice<Predicate*> operands) {
+ return MakeAndOrImpl(operands, /*is_and=*/false);
+ }
+
+ Predicate* MakeNotPredicate(Predicate* pred) {
+ SignatureForNot signature = pred;
+ auto it = interned_not_instances_.find(signature);
+ if (it == interned_not_instances_.end()) {
+ std::unique_ptr<Predicate> new_pred = Make<NotPredicate>(pred);
+ Predicate* new_pred_ptr = new_pred.get();
+ interned_not_instances_.emplace(signature, std::move(new_pred));
+ return new_pred_ptr;
+ } else {
+ return it->second.get();
+ }
+ }
+
+ Predicate* MakeSymbolPredicate(TensorId tensor_id, bool must_be_true) {
+ SignatureForSymbol signature = {tensor_id, must_be_true};
+ auto it = interned_symbol_instances_.find(signature);
+ if (it == interned_symbol_instances_.end()) {
+ std::unique_ptr<Predicate> new_pred =
+ Make<SymbolPredicate>(tensor_id, must_be_true);
+ Predicate* new_pred_ptr = new_pred.get();
+ interned_symbol_instances_.emplace(std::move(signature),
+ std::move(new_pred));
+ return new_pred_ptr;
+ } else {
+ return it->second.get();
+ }
+ }
+
+ Predicate* MakeTrue() { return MakeAndPredicate({}); }
+ Predicate* MakeFalse() { return MakeOrPredicate({}); }
+
+ private:
+ template <typename PredicateT, typename... Args>
+ std::unique_ptr<Predicate> Make(Args&&... args) {
+ return std::unique_ptr<PredicateT>(
+ new PredicateT(std::forward<Args>(args)...));
+ }
+
+ Predicate* MakeAndOrImpl(gtl::ArraySlice<Predicate*> operands, bool is_and);
+
+ // Predicate instances are interned, meaning that there is only a single
+ // instance of a Predicate object with a given content. This makes checking
+ // for structural equality super-cheap -- we can just compare pointers.
+ //
+ // We intern predicates by maintaining a map from the content of a Predicate
+ // to the only instance of said predicate we allow to exist in the
+ // interned_and_or_instances_, interned_not_instances_ and
+ // interned_symbol_instances_ fields. These maps also double up as storage
+ // for the owning pointers to predicate instances.
+
+ using SignatureForAndOr =
+ std::pair<Predicate::Kind, gtl::ArraySlice<Predicate*>>;
+ using SignatureForNot = Predicate*;
+ using SignatureForSymbol = std::pair<SafeTensorId, bool>;
+
+ struct HashSignatureForAndOr {
+ size_t operator()(const SignatureForAndOr& signature) const {
+ size_t hash = ::tensorflow::hash<Predicate::Kind>()(signature.first);
+ for (Predicate* p : signature.second) {
+ hash = Hash64Combine(hash, ::tensorflow::hash<Predicate*>()(p));
+ }
+ return hash;
+ }
+ };
+
+ struct HashSignatureForSymbol {
+ size_t operator()(const SignatureForSymbol& signature) const {
+ return Hash64Combine(SafeTensorId::Hasher()(signature.first),
+ ::tensorflow::hash<bool>()(signature.second));
+ }
+ };
+
+ gtl::FlatMap<SignatureForAndOr, std::unique_ptr<Predicate>,
+ HashSignatureForAndOr>
+ interned_and_or_instances_;
+ gtl::FlatMap<SignatureForNot, std::unique_ptr<Predicate>>
+ interned_not_instances_;
+ gtl::FlatMap<SignatureForSymbol, std::unique_ptr<Predicate>,
+ HashSignatureForSymbol>
+ interned_symbol_instances_;
+};
+
+// Common code to create AndPredicate or OrPredicate instances.
+Predicate* PredicateFactory::MakeAndOrImpl(gtl::ArraySlice<Predicate*> operands,
+ bool is_and) {
+ Predicate::Kind pred_kind =
+ is_and ? Predicate::Kind::kAnd : Predicate::Kind::kOr;
+ gtl::FlatSet<Predicate*> simplified_ops_set;
+ std::vector<Predicate*> simplified_ops;
+ for (Predicate* op : operands) {
+ // Simplify A&A => A and A|A => A.
+ if (!simplified_ops_set.insert(op).second) {
+ continue;
+ }
+
+ if (op->kind() == pred_kind) {
+ // "Inline" the operands of an inner And/Or into the parent And/Or.
+ gtl::ArraySlice<Predicate*> operands =
+ is_and ? dynamic_cast<AndPredicate*>(op)->operands()
+ : dynamic_cast<OrPredicate*>(op)->operands();
+ for (Predicate* subop : operands) {
+ if (simplified_ops_set.insert(subop).second) {
+ simplified_ops.push_back(subop);
+ }
+ }
+ } else {
+ simplified_ops.push_back(op);
+ }
+ }
+
+ if (simplified_ops.size() == 1) {
+ return simplified_ops[0];
+ }
+
+ // Simplify "A&~A=>False" and "A|~A=>True".
+ gtl::FlatSet<Predicate*> negated_ops;
+ for (Predicate* op : simplified_ops) {
+ if (op->kind() == Predicate::Kind::kNot) {
+ negated_ops.insert(dynamic_cast<NotPredicate&>(*op).operand());
+ }
+ }
+
+ for (Predicate* op : simplified_ops) {
+ if (negated_ops.count(op)) {
+ return is_and ? MakeFalse() : MakeTrue();
+ }
+ }
+
+ std::stable_sort(
+ simplified_ops.begin(), simplified_ops.end(),
+ [](Predicate* a, Predicate* b) { return a->hash() < b->hash(); });
+
+ auto it = interned_and_or_instances_.find({pred_kind, simplified_ops});
+ if (it == interned_and_or_instances_.end()) {
+ simplified_ops.shrink_to_fit();
+ // NB! Because we'll use a non-owning reference to simplified_ops in the
+ // key for interned_and_or_instances_ we need to be careful to std::move()
+ // it all the way through.
+ gtl::ArraySlice<Predicate*> operands_slice = simplified_ops;
+ std::unique_ptr<Predicate> new_pred =
+ is_and ? Make<AndPredicate>(std::move(simplified_ops))
+ : Make<OrPredicate>(std::move(simplified_ops));
+
+ Predicate* new_pred_ptr = new_pred.get();
+ CHECK(interned_and_or_instances_
+ .emplace(SignatureForAndOr(pred_kind, operands_slice),
+ std::move(new_pred))
+ .second);
+ return new_pred_ptr;
+ } else {
+ return it->second.get();
+ }
+}
+
+class DeadnessAnalysisImpl : public DeadnessAnalysis {
+ public:
+ explicit DeadnessAnalysisImpl(const Graph* graph)
+ : graph_(*graph), vlog_(VLOG_IS_ON(2)) {}
+
+ Status Populate();
+ bool HasInputsWithMismatchingDeadness(const Node& node) override;
+ void Print() const override;
+
+ private:
+ enum class EdgeKind { kDataAndControl, kDataOnly, kControlOnly };
+
+ std::vector<Predicate*> GetIncomingPreds(Node* n, EdgeKind edge_kind);
+ void SetPred(Node* n, int output_idx, Predicate* pred) {
+ CHECK(
+ predicate_map_.insert({TensorId(n->name(), output_idx), pred}).second);
+ }
+ void SetPred(Node* n, gtl::ArraySlice<int> output_idxs, Predicate* pred) {
+ for (int output_idx : output_idxs) {
+ SetPred(n, output_idx, pred);
+ }
+ }
+
+ Status HandleSwitch(Node* n);
+ Status HandleMerge(Node* n);
+ Status HandleRecv(Node* n);
+ Status HandleGeneric(Node* n);
+
+ const Graph& graph_;
+ gtl::FlatMap<TensorId, Predicate*, TensorId::Hasher> predicate_map_;
+ PredicateFactory predicate_factory_;
+ bool vlog_;
+};
+
+TensorId InputEdgeToTensorId(const Edge* e) {
+ return TensorId(e->src()->name(), e->src_output());
+}
+
+std::vector<Predicate*> DeadnessAnalysisImpl::GetIncomingPreds(
+ Node* n, DeadnessAnalysisImpl::EdgeKind edge_kind) {
+ std::vector<Predicate*> incoming_preds;
+ for (const Edge* in_edge : n->in_edges()) {
+ bool should_process =
+ edge_kind == EdgeKind::kDataAndControl ||
+ (in_edge->IsControlEdge() && edge_kind == EdgeKind::kControlOnly) ||
+ (!in_edge->IsControlEdge() && edge_kind == EdgeKind::kDataOnly);
+
+ if (should_process) {
+ auto it = predicate_map_.find(InputEdgeToTensorId(in_edge));
+ CHECK(it != predicate_map_.end());
+ incoming_preds.push_back(it->second);
+ }
+ }
+ return incoming_preds;
+}
+
+Status DeadnessAnalysisImpl::HandleSwitch(Node* n) {
+ std::vector<Predicate*> input_preds =
+ GetIncomingPreds(n, EdgeKind::kDataAndControl);
+ const Edge* pred_edge;
+ TF_RETURN_IF_ERROR(n->input_edge(1, &pred_edge));
+ Predicate* true_switch = predicate_factory_.MakeSymbolPredicate(
+ TensorId(pred_edge->src()->name(), pred_edge->src_output()),
+ /*must_be_true=*/true);
+ Predicate* false_switch = predicate_factory_.MakeNotPredicate(true_switch);
+
+ // Output 0 is alive iff all inputs are alive and the condition is false.
+ input_preds.push_back(false_switch);
+ SetPred(n, 0, predicate_factory_.MakeAndPredicate(input_preds));
+ input_preds.pop_back();
+
+ // Output 1 is alive iff all inputs are alive and the condition is true.
+ input_preds.push_back(true_switch);
+ SetPred(n, 1, predicate_factory_.MakeAndPredicate(input_preds));
+ input_preds.pop_back();
+
+ // Control is alive iff any inputs are alive.
+ SetPred(n, Graph::kControlSlot,
+ predicate_factory_.MakeAndPredicate(input_preds));
+
+ return Status::OK();
+}
+
+Status DeadnessAnalysisImpl::HandleMerge(Node* n) {
+ // Merge ignores deadness of its control inputs. A merge that isn't the
+ // target of a backedge has is alive iff any of its data inputs are. We treat
+ // the liveness of a merge that is the target of a backedge symbolically.
+
+ bool has_backedge = std::any_of(
+ n->in_edges().begin(), n->in_edges().end(), [](const Edge* e) {
+ return !e->IsControlEdge() && e->src()->IsNextIteration();
+ });
+
+ Predicate* input_data_pred =
+ has_backedge ? predicate_factory_.MakeSymbolPredicate(
+ TensorId(n->name(), 0), /*must_be_true=*/false)
+ : predicate_factory_.MakeOrPredicate(
+ GetIncomingPreds(n, EdgeKind::kDataOnly));
+
+ SetPred(n, {0, 1, Graph::kControlSlot}, input_data_pred);
+ return Status::OK();
+}
+
+Status DeadnessAnalysisImpl::HandleRecv(Node* n) {
+ // In addition to being alive or dead based on the inputs, a _Recv can also
+ // acquire a dead signal from a _Send.
+ std::vector<Predicate*> input_preds =
+ GetIncomingPreds(n, EdgeKind::kDataAndControl);
+ input_preds.push_back(predicate_factory_.MakeSymbolPredicate(
+ TensorId(n->name(), 0), /*must_be_true=*/false));
+ SetPred(n, {0, Graph::kControlSlot},
+ predicate_factory_.MakeAndPredicate(input_preds));
+ return Status::OK();
+}
+
+Status DeadnessAnalysisImpl::HandleGeneric(Node* n) {
+ // Generally nodes are alive iff all their inputs are alive.
+ Predicate* pred = predicate_factory_.MakeAndPredicate(
+ GetIncomingPreds(n, EdgeKind::kDataAndControl));
+ for (int output_idx = 0; output_idx < n->num_outputs(); output_idx++) {
+ SetPred(n, output_idx, pred);
+ }
+ SetPred(n, Graph::kControlSlot, pred);
+ return Status::OK();
+}
+
+Status DeadnessAnalysisImpl::Populate() {
+ std::vector<Node*> rpo;
+ GetReversePostOrder(graph_, &rpo, /*stable_comparator=*/{},
+ /*edge_filter=*/[](const Edge& edge) {
+ return !edge.src()->IsNextIteration();
+ });
+
+ // This an abstract interpretation over the deadness propagation semantics of
+ // the graph executor.
+ for (Node* n : rpo) {
+ if (n->IsSwitch()) {
+ TF_RETURN_IF_ERROR(HandleSwitch(n));
+ } else if (n->IsMerge()) {
+ TF_RETURN_IF_ERROR(HandleMerge(n));
+ } else if (n->IsControlTrigger()) {
+ SetPred(n, Graph::kControlSlot, predicate_factory_.MakeTrue());
+ } else if (n->IsRecv() || n->IsHostRecv()) {
+ TF_RETURN_IF_ERROR(HandleRecv(n));
+ } else {
+ TF_RETURN_IF_ERROR(HandleGeneric(n));
+ }
+ }
+
+ return Status::OK();
+}
+
+bool DeadnessAnalysisImpl::HasInputsWithMismatchingDeadness(const Node& node) {
+ CHECK(!node.IsMerge());
+
+ if (vlog_) {
+ VLOG(2) << "HasInputsWithMismatchingDeadness(" << node.name() << ")";
+ }
+
+ Predicate* pred = nullptr;
+ for (const Edge* edge : node.in_edges()) {
+ auto it = predicate_map_.find(InputEdgeToTensorId(edge));
+ CHECK(it != predicate_map_.end());
+ if (vlog_) {
+ VLOG(2) << " " << InputEdgeToTensorId(edge).ToString() << ": "
+ << it->second->ToString();
+ }
+
+ // Today we just compare the predicates for equality (with some
+ // canonicalization/simplification happening before) but we could be more
+ // sophisticated here if need be. Comparing pointers is sufficient because
+ // we intern Predicate instances by their content.
+ if (pred != nullptr && pred != it->second) {
+ if (vlog_) {
+ VLOG(2) << "HasInputsWithMismatchingDeadness(" << node.name()
+ << ") -> true";
+ }
+ return true;
+ }
+ pred = it->second;
+ }
+
+ if (vlog_) {
+ VLOG(2) << "HasInputsWithMismatchingDeadness(" << node.name()
+ << ") -> false";
+ }
+
+ return false;
+}
+
+void DeadnessAnalysisImpl::Print() const {
+ std::vector<TensorId> tensor_ids;
+ for (const auto& kv_pair : predicate_map_) {
+ tensor_ids.push_back(kv_pair.first);
+ }
+
+ std::sort(tensor_ids.begin(), tensor_ids.end());
+
+ for (TensorId tensor_id : tensor_ids) {
+ auto it = predicate_map_.find(tensor_id);
+ CHECK(it != predicate_map_.end()) << tensor_id.ToString();
+ VLOG(2) << tensor_id.ToString() << " -> " << it->second->ToString();
+ }
+}
+
+} // namespace
+
+DeadnessAnalysis::~DeadnessAnalysis() {}
+
+/*static*/ Status DeadnessAnalysis::Run(
+ const Graph& graph, std::unique_ptr<DeadnessAnalysis>* result) {
+ std::unique_ptr<DeadnessAnalysisImpl> analysis(
+ new DeadnessAnalysisImpl(&graph));
+ TF_RETURN_IF_ERROR(analysis->Populate());
+
+ if (VLOG_IS_ON(2)) {
+ analysis->Print();
+ }
+
+ *result = std::move(analysis);
+ return Status::OK();
+}
+
+} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/deadness_analysis.h b/tensorflow/compiler/jit/deadness_analysis.h
new file mode 100644
index 0000000000..6e7ab41161
--- /dev/null
+++ b/tensorflow/compiler/jit/deadness_analysis.h
@@ -0,0 +1,68 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_JIT_DEADNESS_ANALYSIS_H_
+#define TENSORFLOW_COMPILER_JIT_DEADNESS_ANALYSIS_H_
+
+#include "tensorflow/core/graph/graph.h"
+
+namespace tensorflow {
+
+// This analyzes a TensorFlow graph to identify nodes which may have partially
+// dead inputs (i.e. these nodes may have some dead inputs and some alive
+// inputs).
+//
+// For example, the ADD node in the following graph
+//
+// V0 PRED0 V1 PRED1
+// | | | |
+// v v v v
+// SWITCH SWITCH
+// | |
+// +---+ + ---+
+// | |
+// v v
+// ADD
+//
+// can have its inputs independently dead or alive based on the runtime values
+// of PRED0 and PRED1.
+//
+// It is tempting to call this a liveness analysis but I avoided that because
+// "liveness" already has other connotations.
+class DeadnessAnalysis {
+ public:
+ // Returns true if `node` may have some live inputs and some dead inputs.
+ //
+ // This is a conservatively correct routine -- if it returns false then `node`
+ // is guaranteed to not have inputs with mismatching liveness, but not the
+ // converse.
+ //
+ // REQUIRES: node is not a Merge operation.
+ virtual bool HasInputsWithMismatchingDeadness(const Node& node) = 0;
+
+ // Prints out the internal state of this instance. For debugging purposes
+ // only.
+ virtual void Print() const = 0;
+ virtual ~DeadnessAnalysis();
+
+ // Run the deadness analysis over `graph` and returns an error or a populated
+ // instance of DeadnessAnalysis in `result`.
+ static Status Run(const Graph& graph,
+ std::unique_ptr<DeadnessAnalysis>* result);
+};
+
+} // namespace tensorflow
+
+#endif // TENSORFLOW_COMPILER_JIT_DEADNESS_ANALYSIS_H_
diff --git a/tensorflow/compiler/jit/deadness_analysis_test.cc b/tensorflow/compiler/jit/deadness_analysis_test.cc
new file mode 100644
index 0000000000..584385cab7
--- /dev/null
+++ b/tensorflow/compiler/jit/deadness_analysis_test.cc
@@ -0,0 +1,443 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/jit/deadness_analysis.h"
+
+#include "tensorflow/cc/framework/ops.h"
+#include "tensorflow/cc/ops/array_ops.h"
+#include "tensorflow/cc/ops/control_flow_ops_internal.h"
+#include "tensorflow/cc/ops/function_ops.h"
+#include "tensorflow/cc/ops/sendrecv_ops.h"
+#include "tensorflow/cc/ops/standard_ops.h"
+#include "tensorflow/compiler/jit/defs.h"
+#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
+#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/core/framework/node_def_util.h"
+#include "tensorflow/core/framework/op.h"
+#include "tensorflow/core/graph/algorithm.h"
+#include "tensorflow/core/graph/graph_constructor.h"
+#include "tensorflow/core/graph/graph_def_builder.h"
+#include "tensorflow/core/graph/graph_def_builder_util.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+Status AnalyzeDeadness(Graph* graph,
+ std::unique_ptr<DeadnessAnalysis>* result) {
+ FixupSourceAndSinkEdges(graph);
+ return DeadnessAnalysis::Run(*graph, result);
+}
+
+ops::Switch CreateSwitch(const Scope& root, const string& prefix) {
+ Output value = ops::Placeholder(root.WithOpName(prefix + "/value"), DT_FLOAT);
+ Output predicate =
+ ops::Placeholder(root.WithOpName(prefix + "/pred"), DT_BOOL);
+ return ops::Switch(root.WithOpName(prefix + "/switch"), value, predicate);
+}
+
+Output CreateInductionVariable(const Scope& root, const string& prefix,
+ const string& frame_name, int32 init) {
+ Output initial_value = ops::Const(root.WithOpName(prefix + "/init"), init);
+ Output enter_initial_value = ops::internal::Enter(
+ root.WithOpName(prefix + "/enter"), initial_value, frame_name);
+
+ ops::Merge iv(root.WithOpName(prefix + "/iv"), {enter_initial_value});
+ Output increment_by = ops::Const(root.WithOpName(prefix + "/incr"), 1);
+ Output final_value = ops::Const(root.WithOpName(prefix + "/final"), 10);
+ Output loop_cond_expr =
+ ops::Less(root.WithOpName(prefix + "/less"), iv.output, final_value);
+ Output loop_cond =
+ ops::LoopCond(root.WithOpName(prefix + "/cond"), loop_cond_expr);
+ ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output, loop_cond);
+ ops::internal::Exit exit(root.WithOpName(prefix + "/exit"), iv.output);
+ Output iv_next =
+ ops::Add(root.WithOpName(prefix + "/ivnext"), iv.output, increment_by);
+ Output next_iteration =
+ ops::NextIteration(root.WithOpName(prefix + "next_iteration"), iv_next);
+
+ root.graph()->AddEdge(next_iteration.node(), 0, iv.output.node(), 1);
+ root.graph()->AddControlEdge(iv.output.node(), increment_by.node());
+ root.graph()->AddControlEdge(iv.output.node(), final_value.node());
+
+ return iv.output;
+}
+
+TEST(DeadnessAnalysisTest, BasicPositive) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw = CreateSwitch(root, "0");
+ Output add =
+ ops::Add(root.WithOpName("add"), sw.output_true, sw.output_false);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, BasicNegative) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
+ Output b = ops::Placeholder(root.WithOpName("b"), DT_FLOAT);
+ Output add = ops::Add(root.WithOpName("add"), a, b);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, AndIsCommutative) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+
+ Output a0 =
+ ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false);
+ Output a1 =
+ ops::Add(root.WithOpName("a1"), sw_1.output_false, sw_0.output_false);
+
+ Output b0 =
+ ops::Add(root.WithOpName("b0"), sw_0.output_false, sw_1.output_true);
+ Output b1 =
+ ops::Add(root.WithOpName("b1"), sw_1.output_true, sw_0.output_false);
+
+ Output live0 = ops::Add(root.WithOpName("live0"), a0, a1);
+ Output live1 = ops::Add(root.WithOpName("live1"), b0, b1);
+
+ Output halfdead0 = ops::Add(root.WithOpName("halfdead0"), a0, b0);
+ Output halfdead1 = ops::Add(root.WithOpName("halfdead1"), a1, b1);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*live0.node()));
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*live1.node()));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*halfdead0.node()));
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*halfdead1.node()));
+}
+
+TEST(DeadnessAnalysisTest, AndIsAssociative) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+ ops::Switch sw_2 = CreateSwitch(root, "2");
+
+ Output a0 =
+ ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false);
+ Output a1 = ops::Add(root.WithOpName("a1"), a0, sw_2.output_false);
+
+ Output b0 =
+ ops::Add(root.WithOpName("b0"), sw_1.output_false, sw_2.output_false);
+ Output b1 = ops::Add(root.WithOpName("b1"), sw_0.output_false, b0);
+
+ Output add = ops::Add(root.WithOpName("add"), a1, b1);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, OrIsCommutative) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+
+ ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
+ ops::Merge m1(root.WithOpName("m1"), {sw_1.output_false, sw_0.output_false});
+ ops::Merge m2(root.WithOpName("m2"), {sw_0.output_false, sw_1.output_true});
+ ops::Merge m3(root.WithOpName("m3"), {sw_1.output_true, sw_0.output_false});
+
+ Output live0 = ops::Add(root.WithOpName("live0"), m0.output, m1.output);
+ Output live1 = ops::Add(root.WithOpName("live1"), m2.output, m3.output);
+
+ Output halfdead0 =
+ ops::Add(root.WithOpName("halfdead0"), m0.output, m2.output);
+ Output halfdead1 =
+ ops::Add(root.WithOpName("halfdead1"), m1.output, m3.output);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*live0.node()));
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*live1.node()));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*halfdead0.node()));
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*halfdead1.node()));
+}
+
+TEST(DeadnessAnalysisTest, OrIsAssociative) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+ ops::Switch sw_2 = CreateSwitch(root, "2");
+
+ ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
+ ops::Merge m1(root.WithOpName("m1"), {m0.output, sw_2.output_false});
+ ops::Merge m2(root.WithOpName("m2"), {sw_1.output_false, sw_2.output_false});
+ ops::Merge m3(root.WithOpName("m3"), {sw_0.output_false, m2.output});
+
+ Output add = ops::Add(root.WithOpName("add"), m1.output, m3.output);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, AndOfOr) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+ ops::Switch sw_2 = CreateSwitch(root, "2");
+ ops::Switch sw_3 = CreateSwitch(root, "3");
+
+ ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
+ ops::Merge m1(root.WithOpName("m1"), {sw_2.output_false, sw_3.output_false});
+
+ Output add0 = ops::Add(root.WithOpName("add0"), m0.output, m1.output);
+ Output add1 = ops::Add(root.WithOpName("add1"), m0.output, m1.output);
+
+ Output add2 = ops::Add(root.WithOpName("add2"), add0, add1);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add2.node()));
+}
+
+TEST(DeadnessAnalysisTest, OrOfAnd) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+ ops::Switch sw_2 = CreateSwitch(root, "2");
+ ops::Switch sw_3 = CreateSwitch(root, "3");
+
+ Output add0 =
+ ops::Add(root.WithOpName("add0"), sw_0.output_false, sw_1.output_false);
+ Output add1 =
+ ops::Add(root.WithOpName("add1"), sw_2.output_false, sw_3.output_false);
+
+ ops::Merge m0(root.WithOpName("m0"), {add0, add1});
+ ops::Merge m1(root.WithOpName("m1"), {add0, add1});
+
+ Output add2 = ops::Add(root.WithOpName("add2"), m0.output, m1.output);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add2.node()));
+}
+
+TEST(DeadnessAnalysisTest, NEGATIVE_AndOrDistributive) {
+ // This demonstrates one of the weaknesses in the current approach -- since we
+ // only do some basic simplifications we can't see that "(A|B)&C" ==
+ // "(A&C)|(B&C)".
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ ops::Switch sw_0 = CreateSwitch(root, "0");
+ ops::Switch sw_1 = CreateSwitch(root, "1");
+ ops::Switch sw_2 = CreateSwitch(root, "2");
+
+ ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
+ Output add0 = ops::Add(root.WithOpName("add0"), m0.output, sw_2.output_false);
+
+ Output add1 =
+ ops::Add(root.WithOpName("add1"), sw_0.output_false, sw_2.output_false);
+ Output add2 =
+ ops::Add(root.WithOpName("add2"), sw_1.output_false, sw_2.output_false);
+ ops::Merge m1(root.WithOpName("m1"), {add1, add2});
+
+ Output add3 = ops::Add(root.WithOpName("add3"), add0, m1.output);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add2.node()));
+}
+
+TEST(DeadnessAnalysisTest, Ternary) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ Output predicate = ops::Placeholder(root.WithOpName("predicate"), DT_BOOL);
+ Output true_value = ops::Placeholder(root.WithOpName("true_value"), DT_FLOAT);
+ Output false_value =
+ ops::Placeholder(root.WithOpName("false_value"), DT_FLOAT);
+
+ ops::Switch predicated_true(root.WithOpName("predicated_true"), true_value,
+ predicate);
+
+ ops::Switch predicated_false(root.WithOpName("predicated_false"), true_value,
+ predicate);
+ ops::Merge merge(root.WithOpName("ternary"), {predicated_true.output_true,
+ predicated_false.output_false});
+ Output addend = ops::Placeholder(root.WithOpName("addend"), DT_FLOAT);
+ Output add = ops::Add(root.WithOpName("add"), merge.output, addend);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, Recv) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ Output recv_a = ops::_Recv(root.WithOpName("recv_a"), DT_FLOAT, "tensor_a",
+ "sender", 0, "receiver");
+ Output recv_b = ops::_Recv(root.WithOpName("recv_b"), DT_FLOAT, "tensor_b",
+ "sender", 0, "receiver");
+ Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, HostRecv) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ Output recv_a = ops::_HostRecv(root.WithOpName("recv_a"), DT_FLOAT,
+ "tensor_a", "sender", 0, "receiver");
+ Output recv_b = ops::_HostRecv(root.WithOpName("recv_b"), DT_FLOAT,
+ "tensor_b", "sender", 0, "receiver");
+ Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, Loop) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+ Output iv0 = CreateInductionVariable(root, "iv0", "fr0", 0);
+ Output iv1 = CreateInductionVariable(root, "iv1", "fr0", 0);
+ Output iv2 = CreateInductionVariable(root, "iv2", "fr0", 1);
+ Output add0 = ops::Add(root.WithOpName("add0"), iv0, iv1);
+ Output add1 = ops::Add(root.WithOpName("add1"), iv1, iv2);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ // NB! iv0 and iv1 are equivalent and a smarter deadness analysis would have
+ // noticed that. Today we are pessimistic here because we assign an
+ // uninterpreted symbol to merges with backedges.
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add0.node()));
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add1.node()));
+}
+
+TEST(DeadnessAnalysisTest, ControlInputs) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+ ops::Switch sw = CreateSwitch(root, "0");
+
+ Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
+ Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
+
+ Output const0 = ops::Const(root.WithOpName("const0"), 1);
+ Output const1 = ops::Const(root.WithOpName("const1"), 2);
+
+ Output add = ops::Add(root.WithOpName("add"), const0, const1);
+
+ root.graph()->AddControlEdge(id0.node(), const0.node());
+ root.graph()->AddControlEdge(id1.node(), const1.node());
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, ControlTrigger) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+ ops::Switch sw = CreateSwitch(root, "0");
+
+ Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
+ Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
+
+ ops::ControlTrigger ctrl_trigger0(root.WithOpName("ctrl_trigger0"));
+ ops::ControlTrigger ctrl_trigger1(root.WithOpName("ctrl_trigger1"));
+
+ Output const0 = ops::Const(root.WithOpName("const0"), 1);
+ Output const1 = ops::Const(root.WithOpName("const1"), 2);
+
+ Output add = ops::Add(root.WithOpName("add"), const0, const1);
+
+ root.graph()->AddControlEdge(id0.node(), ctrl_trigger0.operation.node());
+ root.graph()->AddControlEdge(ctrl_trigger0.operation.node(), const0.node());
+
+ root.graph()->AddControlEdge(id1.node(), ctrl_trigger1.operation.node());
+ root.graph()->AddControlEdge(ctrl_trigger1.operation.node(), const1.node());
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, ControlInputsToMerge) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+ ops::Switch sw = CreateSwitch(root, "0");
+
+ Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
+ Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
+
+ Output constant = ops::Const(root.WithOpName("constant"), 5);
+ ops::Merge m0(root.WithOpName("m0"), {constant});
+ ops::Merge m1(root.WithOpName("m0"), {constant});
+ Output add = ops::Add(root.WithOpName("add"), m0.output, m1.output);
+
+ root.graph()->AddControlEdge(id0.node(), m0.output.node());
+ root.graph()->AddControlEdge(id1.node(), m1.output.node());
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_FALSE(result->HasInputsWithMismatchingDeadness(*add.node()));
+}
+
+TEST(DeadnessAnalysisTest, RecvVsSwitch) {
+ // Demonstrates why we need the must_be_true bit on SymbolP.
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ Output recv = ops::_Recv(root.WithOpName("recv"), DT_BOOL, "tensor", "sender",
+ 0, "receiver");
+ Output value = ops::Placeholder(root.WithOpName("value"), DT_BOOL);
+ ops::Switch sw(root.WithOpName("switch"), value, recv);
+ Output logical_and =
+ ops::LogicalAnd(root.WithOpName("and"), recv, sw.output_true);
+
+ std::unique_ptr<DeadnessAnalysis> result;
+ TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
+
+ EXPECT_TRUE(result->HasInputsWithMismatchingDeadness(*logical_and.node()));
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
index 9c424b201e..fdd71c6a58 100644
--- a/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
+++ b/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
@@ -138,7 +138,7 @@ class Encapsulator {
// Find subgraphs marked with 'group_attribute', and build a new
// subgraph, one for each value of 'group_attribute'.
- Status SplitIntoSubgraphs();
+ Status SplitIntoSubgraphs(FunctionLibraryDefinition* library);
// Build a FunctionDef for each subgraph, and add it 'library'. The values of
// the 'group_attribute' annotations become the function names.
@@ -1478,7 +1478,7 @@ Status Encapsulator::CopySubgraphEdges(
return Status::OK();
}
-Status Encapsulator::SplitIntoSubgraphs() {
+Status Encapsulator::SplitIntoSubgraphs(FunctionLibraryDefinition* library) {
Status s;
// Map from input graph nodes to subgraph nodes.
@@ -1513,6 +1513,15 @@ Status Encapsulator::SplitIntoSubgraphs() {
TF_RETURN_IF_ERROR(BuildControlFlowInfo(subgraph.GetGraph(), &dummy));
}
+ if (VLOG_IS_ON(1)) {
+ // Dump subgraphs.
+ for (auto& entry : subgraphs_) {
+ dump_graph::DumpGraphToFile(
+ strings::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
+ *entry.second.GetGraph(), library);
+ }
+ }
+
return s;
}
@@ -1936,6 +1945,8 @@ Status Encapsulator::DoStaticShapeInferenceForOutsideCompilationSend(
// continue.
TensorShapeProto proto;
context->ShapeHandleToProto(shape, &proto);
+ VLOG(2) << "Node " << src_node->name()
+ << " has known shape: " << proto.DebugString();
if (dummy_node_images.find(src_node) == dummy_node_images.end()) {
dummy_node_images[src_node] =
AddDummyShapedNode(src_node, src_port, control_flow_info,
@@ -1953,6 +1964,8 @@ Status Encapsulator::DoStaticShapeInferenceForOutsideCompilationSend(
if (VLOG_IS_ON(2)) {
TensorShapeProto proto;
context->ShapeHandleToProto(shape, &proto);
+ VLOG(2) << "Node " << src_node->name()
+ << " has unknown shape: " << proto.DebugString();
}
stack.push_back({src_node, false});
}
@@ -2195,6 +2208,23 @@ Status Encapsulator::FindClusterDependencies() {
}
}
}
+ if (VLOG_IS_ON(2)) {
+ // Print debug information.
+ VLOG(2) << "node_ancestors_map:";
+ for (const auto& node_iter : node_ancestors_map) {
+ VLOG(2) << "\t" << node_iter.first->name() << ": subgraph = '"
+ << node_iter.second.subgraph
+ << "', outside_compilation_cluster = '"
+ << node_iter.second.outside_compilation_cluster
+ << "', ancestor_clusters: "
+ << (node_iter.second.ancestor_clusters.empty() ? "(empty)" : "");
+ for (const auto& cluster_iter : node_iter.second.ancestor_clusters) {
+ VLOG(2) << "\t\tsubgraph = '" << cluster_iter.subgraph
+ << "', outside_compilation_cluster = '"
+ << cluster_iter.outside_compilation_cluster << "'";
+ }
+ }
+ }
return Status::OK();
}
@@ -2402,7 +2432,7 @@ Status EncapsulateSubgraphsInFunctions(
std::move(outside_compilation_attribute),
&graph_in);
TF_RETURN_IF_ERROR(encapsulator.FindClusterDependencies());
- TF_RETURN_IF_ERROR(encapsulator.SplitIntoSubgraphs());
+ TF_RETURN_IF_ERROR(encapsulator.SplitIntoSubgraphs(library));
TF_RETURN_IF_ERROR(encapsulator.BuildFunctionDefs(
rewrite_subgraph_fn, reuse_existing_functions, library));
@@ -2451,7 +2481,7 @@ Status EncapsulateSubgraphsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateSubgraphsPass::Run";
if (VLOG_IS_ON(1)) {
- dump_graph::DumpGraphToFile("before_encapsulate_subgraphs", **options.graph,
+ dump_graph::DumpGraphToFile("encapsulate_subgraphs_before", **options.graph,
options.flib_def);
}
@@ -2534,7 +2564,7 @@ Status EncapsulateSubgraphsPass::Run(
"EncapsulateSubgraphsPass failed");
if (VLOG_IS_ON(1)) {
- dump_graph::DumpGraphToFile("after_encapsulate_subgraphs", *graph_out,
+ dump_graph::DumpGraphToFile("encapsulate_subgraphs_after", *graph_out,
options.flib_def);
}
diff --git a/tensorflow/compiler/jit/kernels/xla_launch_op.cc b/tensorflow/compiler/jit/kernels/xla_launch_op.cc
index 338fb5a6f0..c5d0e4f8fb 100644
--- a/tensorflow/compiler/jit/kernels/xla_launch_op.cc
+++ b/tensorflow/compiler/jit/kernels/xla_launch_op.cc
@@ -51,7 +51,11 @@ XlaLocalLaunchBase::XlaLocalLaunchBase(OpKernelConstruction* ctx,
if (device_type_ == DeviceType(DEVICE_CPU)) {
platform_id_ = se::host::kHostPlatformId;
} else if (device_type_ == DeviceType(DEVICE_GPU)) {
- platform_id_ = se::cuda::kCudaPlatformId;
+ platform_id_ = ctx->device()
+ ->tensorflow_gpu_device_info()
+ ->stream->parent()
+ ->platform()
+ ->id();
} else {
platform_id_ = nullptr;
}
diff --git a/tensorflow/compiler/jit/mark_for_compilation_pass.cc b/tensorflow/compiler/jit/mark_for_compilation_pass.cc
index 8c3882116d..38eb6d830f 100644
--- a/tensorflow/compiler/jit/mark_for_compilation_pass.cc
+++ b/tensorflow/compiler/jit/mark_for_compilation_pass.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include <unordered_map>
#include <unordered_set>
+#include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/graphcycles/graphcycles.h"
#include "tensorflow/compiler/jit/legacy_flags/mark_for_compilation_pass_flags.h"
@@ -28,6 +29,7 @@ limitations under the License.
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/dump_graph.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
+#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/memory_types.h"
@@ -462,18 +464,19 @@ Status MarkForCompilationPass::Run(
VLOG(1) << "flags->tf_xla_fusion_only = " << flags->tf_xla_fusion_only;
const FunctionLibraryDefinition* fld = options.flib_def;
- auto is_compilable = [global_jit_level, cpu_global_jit, fusion_only, fld](
- const Node* node, const DeviceType& device_type) {
+ std::unique_ptr<DeadnessAnalysis> deadness;
+ {
+ XLA_SCOPED_LOGGING_TIMER_LEVEL("DeadnessAnalysis", 1);
+ TF_RETURN_IF_ERROR(DeadnessAnalysis::Run(**options.graph, &deadness));
+ }
+
+ auto is_compilable = [&](const Node* node, const DeviceType& device_type) {
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(device_type.type(),
&registration)) {
return false;
}
- // Don't compile control trigger nodes. We won't preserve their deadness
- // semantics correctly, so it's safest not to compile them.
- if (node->IsControlTrigger()) return false;
-
// If this device requires a JIT, we must say yes.
if (registration->requires_compilation) return true;
@@ -485,6 +488,14 @@ Status MarkForCompilationPass::Run(
status = fld->GetAttr(*node, kXlaCompileAttr, &compile);
if (status.ok()) return compile;
+ // If inputs to `node` can have conflicting deadness (i.e. some are alive
+ // and some are dead) then don't compile it. XLA cannot represent the
+ // deadness semantics of these nodes correctly and auto-clustering these
+ // nodes can cause deadness to propagate to nodes that should be live.
+ if (node->IsMerge() || deadness->HasInputsWithMismatchingDeadness(*node)) {
+ return false;
+ }
+
// Check for fusable ops only if requested.
if (global_jit_level > 0 && fusion_only && !IsXlaFusable(node->def())) {
return false;
diff --git a/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc b/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
index 772c92d369..2c5f4fb774 100644
--- a/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
+++ b/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
+#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
@@ -680,5 +681,37 @@ TEST(XlaCompilationTest, ClusterIdentityWithNonRefInput) {
EXPECT_EQ(clusters, expected_clusters);
}
+TEST(XlaCompilationTest, ClusterControlTrigger) {
+ Scope root = Scope::NewRootScope().ExitOnError();
+
+ Output recv_a = ops::_Recv(root.WithOpName("recv_a"), DT_BOOL, "tensor_a",
+ "sender", 0, "receiver");
+ Output recv_b = ops::_Recv(root.WithOpName("recv_b"), DT_BOOL, "tensor_b",
+ "sender", 0, "receiver");
+ Output const_a = ops::Const(root.WithOpName("const_a"), 42);
+
+ ops::ControlTrigger ctrl_trigger_a(root.WithOpName("ctrl_trigger_a"));
+ ops::ControlTrigger ctrl_trigger_b(root.WithOpName("ctrl_trigger_b"));
+ root.graph()->AddControlEdge(recv_a.node(), ctrl_trigger_a.operation.node());
+ root.graph()->AddControlEdge(recv_b.node(), ctrl_trigger_a.operation.node());
+ root.graph()->AddControlEdge(ctrl_trigger_b.operation.node(), const_a.node());
+
+ std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
+
+ TF_ASSERT_OK(root.ToGraph(graph.get()));
+ TF_ASSERT_OK(MarkForCompilation(&graph));
+
+ std::unordered_map<string, string> clusters = GetClusters(*graph);
+
+ ASSERT_FALSE(clusters.empty());
+ string cluster_name = clusters.begin()->second;
+
+ // ctrl_trigger_a has inputs with mismatching deadness so it won't be
+ // clustered. ctrl_trigger_b is okay to cluster.
+ std::unordered_map<string, string> expected_clusters(
+ {{"const_a", cluster_name}, {"ctrl_trigger_b", cluster_name}});
+ EXPECT_EQ(clusters, expected_clusters);
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/jit/xla_compilation_cache.cc b/tensorflow/compiler/jit/xla_compilation_cache.cc
index 7ed609c437..54a41a4daa 100644
--- a/tensorflow/compiler/jit/xla_compilation_cache.cc
+++ b/tensorflow/compiler/jit/xla_compilation_cache.cc
@@ -40,7 +40,23 @@ namespace tensorflow {
XlaCompilationCache::XlaCompilationCache(xla::LocalClient* client,
DeviceType device_type)
: client_(client), device_type_(std::move(device_type)) {}
-XlaCompilationCache::~XlaCompilationCache() = default;
+XlaCompilationCache::~XlaCompilationCache() {
+ // Ensure any use of our programs have completed by waiting for all stream
+ // executors to complete.
+ for (auto* executor : client_->backend().stream_executors()) {
+ bool ok = executor->SynchronizeAllActivity();
+ if (!ok) {
+ LOG(ERROR) << "Error synchronizing activity while waiting for all "
+ "programs to complete";
+ }
+ }
+ // TODO(b/110813685): Think about the program ownership model. Programs are
+ // currently owned by the compilation cache which means we must wait for
+ // program completion in the destructor. There are multiple compilation caches
+ // around, which complicates things a little. Perhaps having programs be
+ // shared_ptrs (an invasive change) would make the model easier to reason
+ // about?
+}
string XlaCompilationCache::DebugString() {
return "XLA JIT compilation cache";
diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc
index 04778c0090..8cf198239c 100644
--- a/tensorflow/compiler/jit/xla_device_context.cc
+++ b/tensorflow/compiler/jit/xla_device_context.cc
@@ -74,43 +74,64 @@ Status XlaTransferManager::TransferLiteralToDevice(
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(host_tensor.dtype(),
host_tensor.shape(), &xla_shape));
- xla::BorrowingLiteral literal(
+ // Create a reference to hold onto host_tensor until after the literal has
+ // been transferred. Also make sure the literal exists until the function
+ // asynchronously completes, as it will be wrapped in an xla::LiteralSlice.
+ TensorReference ref(host_tensor);
+ auto literal = std::make_shared<xla::BorrowingLiteral>(
static_cast<const char*>(DMAHelper::base(&host_tensor)), xla_shape);
XlaTensor* xla_tensor = XlaTensor::FromTensor(device_tensor);
const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer();
- VLOG(1) << "Transfer to device as literal: " << literal.ToString() << " "
+ VLOG(1) << "Transfer to device as literal: " << literal->ToString() << " "
<< shaped_buffer.ToString();
- TF_RETURN_IF_ERROR(transfer_manager_->TransferLiteralToDevice(
- host_to_device_stream_, literal, shaped_buffer));
+ if (UseMultipleStreams()) {
+ // Initially wait for the compute stream so that memory allocations are
+ // synchronized.
+ host_to_device_stream_->ThenWaitFor(stream_);
+ }
+ TF_RETURN_IF_ERROR(transfer_manager_->TransferLiteralToDeviceAsync(
+ host_to_device_stream_, *literal, shaped_buffer));
if (UseMultipleStreams()) {
se::Event event(stream_->parent());
TF_RET_CHECK(event.Init()) << "Event failed to initialize!";
host_to_device_stream_->ThenRecordEvent(&event);
xla_tensor->SetDefinedOn(host_to_device_stream_, std::move(event));
}
+ // Unref the host tensor, and capture the literal shared_ptr too so it goes
+ // out of scope when the lambda completes.
+ host_to_device_stream_->ThenDoHostCallback([ref, literal]() { ref.Unref(); });
return Status::OK();
}
-Status XlaTransferManager::TransferLiteralFromDevice(
- Tensor* host_tensor, const Tensor& device_tensor) const {
+void XlaTransferManager::TransferLiteralFromDevice(
+ Tensor* host_tensor, const Tensor& device_tensor,
+ const StatusCallback& done) const {
const xla::ShapedBuffer& shaped_buffer =
XlaTensor::FromTensor(&device_tensor)->shaped_buffer();
- TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::Literal> literal,
- transfer_manager_->TransferLiteralFromDevice(
- device_to_host_stream_, shaped_buffer));
- VLOG(1) << "Transfer from device as literal: " << literal->ToString() << " "
- << shaped_buffer.ToString();
- Tensor tensor;
- TF_RETURN_IF_ERROR(
- LiteralToHostTensor(*literal, host_tensor->dtype(), &tensor));
- // Reshape the tensor back to its declared shape.
- if (!host_tensor->CopyFrom(tensor, device_tensor.shape())) {
- return errors::Internal(
- "Tensor::CopyFrom failed when copying from XLA device to CPU");
- }
- return Status::OK();
+ TensorReference ref(device_tensor);
+ transfer_manager_->TransferLiteralFromDevice(
+ device_to_host_stream_, shaped_buffer,
+ [=, &shaped_buffer](
+ xla::StatusOr<std::unique_ptr<xla::Literal> > literal_or) {
+ ref.Unref();
+ done([&]() -> Status {
+ TF_ASSIGN_OR_RETURN(auto literal, std::move(literal_or));
+ VLOG(1) << "Transfer from device as literal: " << literal->ToString()
+ << " " << shaped_buffer.ToString();
+ Tensor tensor;
+ TF_RETURN_IF_ERROR(
+ LiteralToHostTensor(*literal, host_tensor->dtype(), &tensor));
+ // Reshape the tensor back to its declared shape.
+ Status status;
+ if (!host_tensor->CopyFrom(tensor, device_tensor.shape())) {
+ status = errors::Internal(
+ "Tensor::CopyFrom failed when copying from XLA device to CPU");
+ }
+ return status;
+ }());
+ });
}
void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
@@ -163,6 +184,12 @@ void XlaTransferManager::CopyCPUTensorToDevice(const Tensor* cpu_tensor,
return;
}
status = TransferLiteralToDevice(reshaped_cpu_tensor, device_tensor);
+ if (status.ok()) {
+ xla_tensor->set_host_tensor(*cpu_tensor);
+ host_to_device_stream_->ThenDoHostCallback(
+ [done]() { done(Status::OK()); });
+ return;
+ }
} else {
se::DeviceMemoryBase dev_dst_ptr =
XlaTensor::DeviceMemoryFromTensor(*device_tensor);
@@ -212,7 +239,8 @@ void XlaTransferManager::CopyDeviceTensorToCPU(const Tensor* device_tensor,
Status status;
if (transfer_as_literal_) {
- status = TransferLiteralFromDevice(cpu_tensor, *device_tensor);
+ TransferLiteralFromDevice(cpu_tensor, *device_tensor, done);
+ return;
} else {
device_to_host_stream_->ThenMemcpy(dst_ptr, dev_src_ptr, total_bytes);
// TODO(hpucha): Make this asynchronous.
@@ -234,15 +262,15 @@ void XlaTransferManager::CopyDeviceTensorToDevice(const Tensor& src_tensor,
<< reinterpret_cast<const void*>(src_tensor.tensor_data().data())
<< " "
<< reinterpret_cast<const void*>(dst_tensor->tensor_data().data());
- // TODO(phawkins): replace this code with an asynchronous implementation.
- auto body = [&]() {
+ // Perform memory allocation now, and enqueue the device-to-device transfer.
+ Status status = [&]() -> Status {
if (src_tensor.NumElements() == 0) {
return Status::OK();
}
// TODO(jmolloy): We co-opt the device_to_host stream for device to device
// transfers; perhaps we should have a dedicated device to device stream? or
// one per device?
- auto device_to_device_stream = device_to_host_stream_;
+ auto device_to_device_stream = stream_;
XlaTensor* xla_src = XlaTensor::FromTensor(&src_tensor);
XlaTensor* xla_dst = XlaTensor::FromTensor(dst_tensor);
CHECK(xla_src && xla_dst)
@@ -254,29 +282,40 @@ void XlaTransferManager::CopyDeviceTensorToDevice(const Tensor& src_tensor,
TF_RETURN_IF_ERROR(
xla_dst->AllocateShapedBuffer(src_tensor.dtype(), shape, client_,
stream_->parent()->device_ordinal()));
+ if (stream_ != device_to_device_stream) {
+ // Initially wait for the compute stream so that memory allocations are
+ // synchronized.
+ device_to_device_stream->ThenWaitFor(stream_);
+ }
}
if (se::Event* event =
xla_src->GetDefinitionEvent(device_to_device_stream)) {
device_to_device_stream->ThenWaitFor(event);
xla_src->SetDefinedOn(device_to_device_stream);
- TF_RETURN_IF_ERROR(device_to_device_stream->BlockHostUntilDone());
}
- TF_RETURN_IF_ERROR(
- xla_dst->shaped_buffer().buffers().ForEachMutableElementWithStatus(
- [&](const xla::ShapeIndex& index, se::DeviceMemoryBase* buffer) {
- const se::DeviceMemoryBase& from_buffer =
- xla_src->shaped_buffer().buffers().element(index);
- CHECK_EQ(buffer->size(), from_buffer.size());
- if (!stream_->parent()->SynchronousMemcpy(buffer, from_buffer,
- buffer->size())) {
- return errors::Internal("Device to device memcpy failed");
- }
- return Status::OK();
- }));
+
+ auto from_iter = xla_src->shaped_buffer().buffers().begin();
+ auto to_iter = xla_dst->shaped_buffer().buffers().begin();
+ for (auto end_iter = xla_src->shaped_buffer().buffers().end();
+ from_iter != end_iter; ++from_iter, ++to_iter) {
+ device_to_device_stream->ThenMemcpyD2D(
+ &to_iter->second, from_iter->second, to_iter->second.size());
+ }
+
+ if (UseMultipleStreams()) {
+ se::Event event(stream_->parent());
+ CHECK(event.Init());
+ device_to_device_stream->ThenRecordEvent(&event);
+ xla_dst->SetDefinedOn(device_to_device_stream, std::move(event));
+ }
return Status::OK();
- };
- done(body());
+ }();
+ if (!status.ok()) {
+ return done(status);
+ } else {
+ stream_->ThenDoHostCallback([=]() { done(Status::OK()); });
+ }
}
XlaDeviceContext::XlaDeviceContext(
diff --git a/tensorflow/compiler/jit/xla_device_context.h b/tensorflow/compiler/jit/xla_device_context.h
index c726495f96..912f8d779e 100644
--- a/tensorflow/compiler/jit/xla_device_context.h
+++ b/tensorflow/compiler/jit/xla_device_context.h
@@ -66,8 +66,9 @@ class XlaTransferManager {
private:
Status TransferLiteralToDevice(const Tensor& host_tensor,
Tensor* device_tensor) const;
- Status TransferLiteralFromDevice(Tensor* host_tensor,
- const Tensor& device_tensor) const;
+ void TransferLiteralFromDevice(Tensor* host_tensor,
+ const Tensor& device_tensor,
+ const StatusCallback& done) const;
bool UseMultipleStreams() const { return stream_ != host_to_device_stream_; }
// The main compute stream of the device, used to synchronize the transfer
diff --git a/tensorflow/compiler/jit/xla_device_ops.h b/tensorflow/compiler/jit/xla_device_ops.h
index a605335a94..6adda327f1 100644
--- a/tensorflow/compiler/jit/xla_device_ops.h
+++ b/tensorflow/compiler/jit/xla_device_ops.h
@@ -77,9 +77,7 @@ class XlaAssignVariableOp : public AsyncOpKernel {
ConstantOp); \
REGISTER_KERNEL_BUILDER( \
Name("Identity").Device(DEVICE).TypeConstraint("T", TYPES), IdentityOp); \
- REGISTER_KERNEL_BUILDER( \
- Name("IdentityN").Device(DEVICE).TypeConstraint("T", TYPES), \
- IdentityNOp); \
+ REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE), IdentityNOp); \
REGISTER_KERNEL_BUILDER(Name("Placeholder").Device(DEVICE), PlaceholderOp); \
REGISTER_KERNEL_BUILDER(Name("PlaceholderV2").Device(DEVICE), \
PlaceholderOp); \
@@ -90,6 +88,9 @@ class XlaAssignVariableOp : public AsyncOpKernel {
REGISTER_KERNEL_BUILDER( \
Name("ReadVariableOp").Device(DEVICE).HostMemory("resource"), \
ReadVariableOp); \
+ REGISTER_KERNEL_BUILDER( \
+ Name("DestroyResourceOp").Device(DEVICE).HostMemory("resource"), \
+ DestroyResourceOp); \
REGISTER_KERNEL_BUILDER(Name("Shape") \
.Device(DEVICE) \
.HostMemory("output") \
diff --git a/tensorflow/compiler/jit/xla_fusion_optimizer.cc b/tensorflow/compiler/jit/xla_fusion_optimizer.cc
index 74257b09a8..4b499b1613 100644
--- a/tensorflow/compiler/jit/xla_fusion_optimizer.cc
+++ b/tensorflow/compiler/jit/xla_fusion_optimizer.cc
@@ -20,6 +20,7 @@ limitations under the License.
#include <unordered_map>
#include <unordered_set>
+#include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/graphcycles/graphcycles.h"
#include "tensorflow/compiler/jit/union_find.h"
@@ -146,6 +147,9 @@ Status XlaFusionOptimizer::Optimize(grappler::Cluster* cluster,
TF_RETURN_IF_ERROR(
ImportGraphDef(options, item.graph, &graph, &shape_refiner));
+ std::unique_ptr<DeadnessAnalysis> deadness;
+ TF_RETURN_IF_ERROR(DeadnessAnalysis::Run(graph, &deadness));
+
// Collect nodes that can be fused via XLA, while ignoring those that
// explicitly ask for XLA: (*) nodes that are marked to be compiled
// explicitly. (*) nodes assigned to XLA device.
@@ -185,6 +189,14 @@ Status XlaFusionOptimizer::Optimize(grappler::Cluster* cluster,
continue;
}
+ // If inputs to `node` can have conflicting deadness (i.e. some are alive
+ // and some are dead) then don't compile it. XLA cannot represent the
+ // deadness semantics of these nodes correctly and auto-clustering these
+ // nodes can cause deadness to propagate to nodes that should be live.
+ if (node->IsMerge() || deadness->HasInputsWithMismatchingDeadness(*node)) {
+ continue;
+ }
+
compilation_candidates.insert(node);
}
diff --git a/tensorflow/compiler/jit/xla_launch_util.cc b/tensorflow/compiler/jit/xla_launch_util.cc
index 616c3ed2a2..6134b8c694 100644
--- a/tensorflow/compiler/jit/xla_launch_util.cc
+++ b/tensorflow/compiler/jit/xla_launch_util.cc
@@ -64,11 +64,13 @@ xla::StatusOr<xla::OwningDeviceMemory> XlaAllocator::Allocate(
int device_ordinal, uint64 size, bool retry_on_failure) {
AllocationAttributes attrs;
attrs.no_retry_on_failure = !retry_on_failure;
- void* data =
- wrapped_->AllocateRaw(Allocator::kAllocatorAlignment, size, attrs);
- if (data == nullptr) {
- return errors::ResourceExhausted("Out of memory while trying to allocate ",
- size, " bytes.");
+ void* data = nullptr;
+ if (size != 0) {
+ data = wrapped_->AllocateRaw(Allocator::kAllocatorAlignment, size, attrs);
+ if (data == nullptr) {
+ return errors::ResourceExhausted(
+ "Out of memory while trying to allocate ", size, " bytes.");
+ }
}
return xla::OwningDeviceMemory(se::DeviceMemoryBase(data, size),
device_ordinal, this);
diff --git a/tensorflow/compiler/jit/xla_launch_util.h b/tensorflow/compiler/jit/xla_launch_util.h
index 90531174ff..1ea3fa4cf2 100644
--- a/tensorflow/compiler/jit/xla_launch_util.h
+++ b/tensorflow/compiler/jit/xla_launch_util.h
@@ -122,7 +122,11 @@ class XlaTensorBuffer : public TensorBuffer {
data_ = const_cast<void*>(ptr);
}
- ~XlaTensorBuffer() override { allocator_->DeallocateRaw(data_); }
+ ~XlaTensorBuffer() override {
+ if (data_) {
+ allocator_->DeallocateRaw(data_);
+ }
+ }
void* data() const override { return data_; }
size_t size() const override { return expected_size_; }
diff --git a/tensorflow/compiler/jit/xla_tensor.cc b/tensorflow/compiler/jit/xla_tensor.cc
index 5dff187fff..d777dfa5a3 100644
--- a/tensorflow/compiler/jit/xla_tensor.cc
+++ b/tensorflow/compiler/jit/xla_tensor.cc
@@ -92,10 +92,8 @@ se::Event* XlaTensor::GetDefinitionEvent(se::Stream* stream) {
void XlaTensor::SetDefinedOn(se::Stream* stream, se::Event event) {
mutex_lock lock(mu_);
- CHECK(!definition_event_.has_value())
- << "SetDefinedOn must only be called once!";
definition_event_ = std::move(event);
- streams_defined_on_.push_back(stream);
+ streams_defined_on_ = {stream};
}
void XlaTensor::SetDefinedOn(se::Stream* stream) {
diff --git a/tensorflow/compiler/tests/BUILD b/tensorflow/compiler/tests/BUILD
index e8e19f055e..080bed50e6 100644
--- a/tensorflow/compiler/tests/BUILD
+++ b/tensorflow/compiler/tests/BUILD
@@ -418,7 +418,7 @@ tf_xla_py_test(
tf_xla_py_test(
name = "eager_test",
- size = "small",
+ size = "large",
srcs = ["eager_test.py"],
disabled_backends = [
# TODO(b/78199195) Support XLA CPU devices in eager runtime
diff --git a/tensorflow/compiler/tests/binary_ops_test.py b/tensorflow/compiler/tests/binary_ops_test.py
index 9cb3d04546..0aafda7fb4 100644
--- a/tensorflow/compiler/tests/binary_ops_test.py
+++ b/tensorflow/compiler/tests/binary_ops_test.py
@@ -691,11 +691,13 @@ class BinaryOpsTest(xla_test.XLATestCase):
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
- self._testBinary(
- less_op,
- np.array([[10], [7], [2], [-1]], dtype=np.int64),
- np.int64(7),
- expected=np.array([[False], [False], [True], [True]], dtype=np.bool))
+ if np.int64 in self.numeric_types:
+ self._testBinary(
+ less_op,
+ np.array([[10], [7], [2], [-1]], dtype=np.int64),
+ np.int64(7),
+ expected=np.array(
+ [[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
diff --git a/tensorflow/compiler/tests/cholesky_op_test.py b/tensorflow/compiler/tests/cholesky_op_test.py
index d2867278af..ed532db0ee 100644
--- a/tensorflow/compiler/tests/cholesky_op_test.py
+++ b/tensorflow/compiler/tests/cholesky_op_test.py
@@ -18,8 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import unittest
-
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
@@ -103,9 +101,8 @@ class CholeskyOpTest(xla_test.XLATestCase):
with self.assertRaises(ValueError):
linalg_ops.cholesky(tensor3)
- @unittest.skip("Test is slow")
- def testLarge(self):
- n = 200
+ def testLarge2000x2000(self):
+ n = 2000
shape = (n, n)
data = np.ones(shape).astype(np.float32) / (2.0 * n) + np.diag(
np.ones(n).astype(np.float32))
@@ -128,6 +125,5 @@ class CholeskyOpTest(xla_test.XLATestCase):
matrix = np.dot(np.dot(w, np.diag(v)), w.T).astype(dtype)
self._verifyCholesky(matrix, atol=1e-4)
-
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/compiler/tests/conv2d_test.py b/tensorflow/compiler/tests/conv2d_test.py
index 98d41ba7ed..f9db103f6d 100644
--- a/tensorflow/compiler/tests/conv2d_test.py
+++ b/tensorflow/compiler/tests/conv2d_test.py
@@ -33,12 +33,9 @@ from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
-
DATA_FORMATS = (
("_data_format_NHWC", "NHWC"),
("_data_format_NCHW", "NCHW"),
- ("_data_format_HWNC", "HWNC"),
- ("_data_format_HWCN", "HWCN"),
)
diff --git a/tensorflow/compiler/tests/eager_test.py b/tensorflow/compiler/tests/eager_test.py
index 3524666499..6ead15da13 100644
--- a/tensorflow/compiler/tests/eager_test.py
+++ b/tensorflow/compiler/tests/eager_test.py
@@ -403,7 +403,7 @@ class EagerFunctionTest(xla_test.XLATestCase):
def testSliceInDefun(self):
with self.test_scope():
- @function.defun(compiled=True)
+ @function.defun
def f(x, y):
return x[0::2, y:, ...]
@@ -418,6 +418,22 @@ class EagerFunctionTest(xla_test.XLATestCase):
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
+ def testNestedDefun(self):
+ self.skipTest('Nested defuns do not work on TPU at the moment')
+ with self.test_scope():
+
+ @function.defun
+ def times_two(x):
+ return 2 * x
+
+ @function.defun
+ def two_x_plus_1(x):
+ return times_two(x) + 1
+
+ x = constant_op.constant([2, 3, 4])
+ y = two_x_plus_1(x)
+ self.assertAllEqual([5, 7, 9], y.numpy())
+
class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
@@ -470,6 +486,36 @@ class ExcessivePaddingTest(xla_test.XLATestCase):
self.assertAllEqual(100 * [[36.0]], reduced)
+def multiple_tpus():
+ devices = context.context().devices()
+ return len([d for d in devices if 'device:TPU:' in d]) > 1
+
+
+class MultiDeviceTest(xla_test.XLATestCase):
+ """Test running TPU computation on more than one core."""
+
+ def testBasic(self):
+ if not multiple_tpus():
+ self.skipTest('MultiDeviceTest requires multiple TPU devices.')
+
+ # Compute 10 on TPU core 0
+ with ops.device('device:TPU:0'):
+ two = constant_op.constant(2)
+ five = constant_op.constant(5)
+ ten = two * five
+ self.assertAllEqual(10, ten)
+
+ # Compute 6 on TPU core 1
+ with ops.device('device:TPU:1'):
+ two = constant_op.constant(2)
+ three = constant_op.constant(3)
+ six = two * three
+ self.assertAllEqual(6, six)
+
+ # Copy 10 and 6 to CPU and sum them
+ self.assertAllEqual(16, ten + six)
+
+
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
diff --git a/tensorflow/compiler/tests/qr_op_test.py b/tensorflow/compiler/tests/qr_op_test.py
index 93752a21db..1b969ee2b3 100644
--- a/tensorflow/compiler/tests/qr_op_test.py
+++ b/tensorflow/compiler/tests/qr_op_test.py
@@ -57,7 +57,7 @@ class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
def CheckApproximation(self, a, q, r):
# Tests that a ~= q*r.
precision = self.AdjustedNorm(a - np.matmul(q, r))
- self.assertTrue(np.all(precision < 5.0))
+ self.assertTrue(np.all(precision < 10.0))
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
@@ -107,6 +107,9 @@ class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
self._test(dtype, batch_dims + (rows, cols), full_matrices)
+ def testLarge2000x2000(self):
+ self._test(np.float32, (2000, 2000), full_matrices=True)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/compiler/tests/sort_ops_test.py b/tensorflow/compiler/tests/sort_ops_test.py
index 9e2ef964a1..7ff01be3cb 100644
--- a/tensorflow/compiler/tests/sort_ops_test.py
+++ b/tensorflow/compiler/tests/sort_ops_test.py
@@ -88,6 +88,38 @@ class XlaSortOpTest(xla_test.XLATestCase):
topk, [x.astype(dtype)],
expected=[x[indices].astype(dtype), indices])
+ def testTopK2D(self):
+ # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
+ if self.device in ["XLA_CPU", "XLA_GPU"]:
+ return
+
+ supported_types = set(
+ [dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
+ for dtype in supported_types.intersection(self.numeric_types):
+ # Use small input size for bfloat16. Otherwise, we'll get duplicate values
+ # after conversion to bfloat16, so the possible resulting index array is
+ # no longer unique.
+ if dtype == dtypes.bfloat16.as_numpy_dtype:
+ array_size = 10
+ k_options = [0, 1, 2, 10]
+ else:
+ array_size = 200 * 1000
+ k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
+ batch = 16
+ for x in [np.arange(batch * array_size)]:
+ np.random.shuffle(x)
+ x = np.reshape(x, [batch, array_size])
+ for k in k_options:
+ indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
+ expected = np.sort(x, axis=1)[::, -1:-k - 1:-1]
+
+ def topk(v, k=k):
+ return nn_ops.top_k(v, k=k, sorted=True)
+
+ self._assertOpOutputMatchesExpected(
+ topk, [x.astype(dtype)],
+ expected=[expected.astype(dtype), indices])
+
def testTopKZeros(self):
"""Tests that positive and negative zeros sort correctly."""
# TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
diff --git a/tensorflow/compiler/tests/unary_ops_test.py b/tensorflow/compiler/tests/unary_ops_test.py
index 6a7011aea6..5f25ff9002 100644
--- a/tensorflow/compiler/tests/unary_ops_test.py
+++ b/tensorflow/compiler/tests/unary_ops_test.py
@@ -382,6 +382,62 @@ class UnaryOpsTest(xla_test.XLATestCase):
expected=np.array(
[[True, False, True], [False, True, True]], dtype=np.bool))
+ self._assertOpOutputMatchesExpected(
+ math_ops.lgamma,
+ np.array(
+ [[1, 2, 3], [4, 5, 6], [1 / 2, 3 / 2, 5 / 2],
+ [-3 / 2, -7 / 2, -11 / 2]],
+ dtype=dtype),
+ expected=np.array(
+ [
+ [0, 0, np.log(2.0)],
+ [np.log(6.0), np.log(24.0),
+ np.log(120)],
+ [
+ np.log(np.pi) / 2,
+ np.log(np.pi) / 2 - np.log(2),
+ np.log(np.pi) / 2 - np.log(4) + np.log(3)
+ ],
+ [
+ np.log(np.pi) / 2 - np.log(3) + np.log(4),
+ np.log(np.pi) / 2 - np.log(105) + np.log(16),
+ np.log(np.pi) / 2 - np.log(10395) + np.log(64),
+ ],
+ ],
+ dtype=dtype))
+
+ self._assertOpOutputMatchesExpected(
+ math_ops.digamma,
+ np.array(
+ [[1.0, 0.5, 1 / 3.0], [0.25, 1 / 6.0, 0.125], [2.0, 3.0, 4.0],
+ [6.0, 8.0, 9.0]],
+ dtype=dtype),
+ expected=np.array(
+ [
+ [
+ -np.euler_gamma, -2 * np.log(2) - np.euler_gamma,
+ -np.pi / 2 / np.sqrt(3) - 3 * np.log(3) / 2 -
+ np.euler_gamma
+ ],
+ [
+ -np.pi / 2 - 3 * np.log(2) - np.euler_gamma,
+ -np.pi * np.sqrt(3) / 2 - 2 * np.log(2) -
+ 3 * np.log(3) / 2 - np.euler_gamma,
+ -np.pi / 2 - 4 * np.log(2) -
+ (np.pi + np.log(2 + np.sqrt(2)) - np.log(2 - np.sqrt(2)))
+ / np.sqrt(2) - np.euler_gamma
+ ],
+ [
+ 1 - np.euler_gamma, 1.5 - np.euler_gamma,
+ 11 / 6.0 - np.euler_gamma
+ ],
+ [
+ 137 / 60.0 - np.euler_gamma, 363 / 140.0 - np.euler_gamma,
+ 761 / 280.0 - np.euler_gamma
+ ],
+ ],
+ dtype=dtype))
+
def quantize_and_dequantize_v2(x):
return array_ops.quantize_and_dequantize_v2(
x, -127, 127, signed_input=True, num_bits=8)
diff --git a/tensorflow/compiler/tf2xla/graph_compiler.cc b/tensorflow/compiler/tf2xla/graph_compiler.cc
index 4900af6df1..e1cea03865 100644
--- a/tensorflow/compiler/tf2xla/graph_compiler.cc
+++ b/tensorflow/compiler/tf2xla/graph_compiler.cc
@@ -161,9 +161,8 @@ Status GraphCompiler::Compile() {
outputs.resize(n->num_outputs());
for (int o = 0; o < n->num_outputs(); ++o) {
outputs[o] = op_context.release_output(o);
- if (*op_context.is_output_dead() || outputs[o].tensor == nullptr) {
+ if (outputs[o].tensor == nullptr) {
return errors::Internal("Missing xla_context ", o, "-th output from ",
- (*op_context.is_output_dead() ? "(dead)" : ""),
SummarizeNode(*n));
}
}
diff --git a/tensorflow/compiler/tf2xla/kernels/BUILD b/tensorflow/compiler/tf2xla/kernels/BUILD
index 5a335aa43c..d88a34dfd9 100644
--- a/tensorflow/compiler/tf2xla/kernels/BUILD
+++ b/tensorflow/compiler/tf2xla/kernels/BUILD
@@ -127,6 +127,7 @@ tf_kernel_library(
"//tensorflow/compiler/xla/client/lib:constants",
"//tensorflow/compiler/xla/client/lib:math",
"//tensorflow/compiler/xla/client/lib:numeric",
+ "//tensorflow/compiler/xla/client/lib:prng",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/core:framework",
"//tensorflow/core:image_ops_op_lib",
diff --git a/tensorflow/compiler/tf2xla/kernels/diag_op.cc b/tensorflow/compiler/tf2xla/kernels/diag_op.cc
index 6dec414c53..22cda27567 100644
--- a/tensorflow/compiler/tf2xla/kernels/diag_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/diag_op.cc
@@ -123,8 +123,6 @@ class DiagPartOp : public XlaOpKernel {
explicit DiagPartOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* builder = ctx->builder();
-
const TensorShape input_shape = ctx->InputShape(0);
auto dims = input_shape.dim_sizes();
@@ -150,37 +148,13 @@ class DiagPartOp : public XlaOpKernel {
new_dims.push_back(dims[i]);
}
- xla::XlaOp diag = ctx->Input(0);
-
- // TODO(b/30878775): use Slice with strides when supported, in place of
- // the Pad -> Reshape -> Slice.
-
- // Picture:
- // [[1, 0, 0, 0] pad and reshape to [[1, 0, 0, 0, 0],
- // [0, 2, 0, 0] =================> [2, 0, 0, 0, 0],
- // [0, 0, 3, 0] [3, 0, 0, 0, 0],
- // [0, 0, 0, 4]] [4, 0, 0, 0, 0]]
- // and then slice out the first column.
-
- // Flattens the input to 1D.
- int64 size = input_shape.num_elements();
- diag = xla::Reshape(diag, {size});
-
- // Adds padding after the last element of 'new_size'.
- xla::PaddingConfig config;
- auto* dim = config.add_dimensions();
- dim->set_edge_padding_high(new_size);
- auto zero = XlaHelpers::Zero(builder, input_type(0));
- diag = xla::Pad(diag, zero, config);
-
- // Reshapes so the diagonal is now in the first column.
- diag = xla::Reshape(diag, {new_size, new_size + 1});
+ xla::XlaOp input = ctx->Input(0);
- // Slices out the first column and reshapes to the final shape.
- diag = xla::Slice(diag, {0, 0}, {new_size, 1}, {1, 1});
- diag = xla::Reshape(diag, new_dims);
+ xla::XlaOp output = xla::Reshape(
+ xla::GetMatrixDiagonal(xla::Reshape(input, {new_size, new_size})),
+ new_dims);
- ctx->SetOutput(0, diag);
+ ctx->SetOutput(0, output);
}
};
@@ -220,8 +194,6 @@ class MatrixDiagPartOp : public XlaOpKernel {
explicit MatrixDiagPartOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
- xla::XlaBuilder* builder = ctx->builder();
-
const TensorShape input_shape = ctx->InputShape(0);
auto dims = input_shape.dim_sizes();
@@ -229,71 +201,8 @@ class MatrixDiagPartOp : public XlaOpKernel {
errors::InvalidArgument("Expected 2 <= dims, got shape ",
input_shape.DebugString()));
- xla::XlaOp diag = ctx->Input(0);
-
- int last_dim = dims.size() - 1;
- int64 last_dim_size = dims[last_dim];
-
- // The smaller of the last two dimension sizes.
- int64 smaller_dim_size = std::min(dims[last_dim - 1], dims[last_dim]);
-
- // TODO(b/30878775): use Slice with strides when supported, in place of
- // the Pad -> Reshape -> Slice.
-
- // Picture: for each 2D matrix in the tensor's last two dimensions:
- // [[1, 0, 0, 0] pad and reshape to [[1, 0, 0, 0, 0],
- // [0, 2, 0, 0] =================> [2, 0, 0, 0, 0],
- // [0, 0, 3, 0]] [3, 0, 0, 0, 0],
- // and then slice out the first column.
- //
- // Another example, with tall and narrow input.
- // [[1, 0] pad and reshape to [[1, 0, 0],
- // [0, 2] =================> [2, 0, 0]]
- // [0, 0]
- // [0, 0]]
-
- // Collapses the last two dimensions.
- std::vector<int64> flattened_dims(dims.begin(), dims.end() - 1);
- flattened_dims.back() *= dims.back();
- diag = xla::Reshape(diag, flattened_dims);
-
- // Slices or pads the last dimension to 'target_size'.
- int64 actual_size = flattened_dims.back();
- int64 target_size = smaller_dim_size * (last_dim_size + 1);
- if (actual_size < target_size) {
- xla::PaddingConfig config =
- xla::MakeNoPaddingConfig(flattened_dims.size());
- auto* dim = config.mutable_dimensions(flattened_dims.size() - 1);
- dim->set_edge_padding_high(target_size - actual_size);
- auto zero = XlaHelpers::Zero(builder, input_type(0));
- diag = xla::Pad(diag, zero, config);
- } else if (actual_size > target_size) {
- std::vector<int64> start(flattened_dims.size(), 0);
- std::vector<int64> limits(flattened_dims.begin(), flattened_dims.end());
- std::vector<int64> strides(flattened_dims.size(), 1);
- limits[flattened_dims.size() - 1] = target_size;
- diag = xla::Slice(diag, start, limits, strides);
- }
-
- // Reshape so the target values are in the first position of the last
- // dimension.
- std::vector<int64> unflattened_dims(dims.begin(), dims.end());
- dims[last_dim - 1] = smaller_dim_size;
- dims[last_dim] = last_dim_size + 1;
- diag = xla::Reshape(diag, dims);
-
- // Slices out the first column and reshapes to the final shape.
- std::vector<int64> start(dims.size(), 0);
- std::vector<int64> limits(dims.begin(), dims.end());
- std::vector<int64> strides(dims.size(), 1);
- limits[last_dim] = 1;
- diag = xla::Slice(diag, start, limits, strides);
-
- // Collapses away the last dimension.
- dims.pop_back();
- diag = xla::Reshape(diag, dims);
-
- ctx->SetOutput(0, diag);
+ xla::XlaOp input = ctx->Input(0);
+ ctx->SetOutput(0, xla::GetMatrixDiagonal(input));
}
};
diff --git a/tensorflow/compiler/tf2xla/kernels/if_op.cc b/tensorflow/compiler/tf2xla/kernels/if_op.cc
index f5fcf3cacd..e2160feba0 100644
--- a/tensorflow/compiler/tf2xla/kernels/if_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/if_op.cc
@@ -246,6 +246,7 @@ void XlaIfOp::Compile(XlaOpKernelContext* ctx) {
VLOG(1) << "Done building If";
}
+REGISTER_XLA_OP(Name("If").AllowResourceTypes(), XlaIfOp);
REGISTER_XLA_OP(Name("XlaIf").AllowResourceTypes(), XlaIfOp);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/matmul_op.cc b/tensorflow/compiler/tf2xla/kernels/matmul_op.cc
index 844080b8cf..aa45b02551 100644
--- a/tensorflow/compiler/tf2xla/kernels/matmul_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/matmul_op.cc
@@ -54,10 +54,14 @@ class MatMulOp : public XlaOpKernel {
const TensorShape b_shape = ctx->InputShape(1);
// Check that the dimensions of the two matrices are valid.
- OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_shape),
- errors::InvalidArgument("In[0] is not a matrix"));
- OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b_shape),
- errors::InvalidArgument("In[1] is not a matrix"));
+ OP_REQUIRES(
+ ctx, TensorShapeUtils::IsMatrix(a_shape),
+ errors::InvalidArgument("In[0] is not a matrix. Instead it has shape ",
+ a_shape.DebugString()));
+ OP_REQUIRES(
+ ctx, TensorShapeUtils::IsMatrix(b_shape),
+ errors::InvalidArgument("In[1] is not a matrix. Instead it has shape ",
+ b_shape.DebugString()));
int first_index = transpose_a_ ? 0 : 1;
int second_index = transpose_b_ ? 1 : 0;
diff --git a/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc b/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
index a6f5769e7b..cc4b13d3b9 100644
--- a/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/stateless_random_ops.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/client/lib/constants.h"
#include "tensorflow/compiler/xla/client/lib/math.h"
#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/lib/prng.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
@@ -33,134 +34,6 @@ limitations under the License.
namespace tensorflow {
namespace {
-// Rotates a 32-bit integer 'v' left by 'distance' bits.
-xla::XlaOp RotateLeftS32(xla::XlaBuilder* builder, const xla::XlaOp& v,
- int distance) {
- return xla::Or(
- xla::ShiftLeft(v, xla::ConstantR0<int>(builder, distance)),
- xla::ShiftRightLogical(v, xla::ConstantR0<int>(builder, 32 - distance)));
-}
-
-using ThreeFry2x32State = std::array<xla::XlaOp, 2>;
-
-// Implements the ThreeFry counter-based PRNG algorithm.
-// Salmon et al. SC 2011. Parallel random numbers: as easy as 1, 2, 3.
-// http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
-ThreeFry2x32State ThreeFry2x32(xla::XlaBuilder* builder,
- ThreeFry2x32State input, ThreeFry2x32State key) {
- // Rotation distances specified by the Threefry2x32 algorithm.
- constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
- ThreeFry2x32State x;
-
- std::array<xla::XlaOp, 3> ks;
- // 0x1BD11BDA is a parity constant specified by the ThreeFry2x32 algorithm.
- ks[2] = xla::ConstantR0<int32>(builder, 0x1BD11BDA);
- for (int i = 0; i < 2; ++i) {
- ks[i] = key[i];
- x[i] = input[i];
- ks[2] = xla::Xor(ks[2], key[i]);
- }
-
- x[0] = xla::Add(x[0], ks[0]);
- x[1] = xla::Add(x[1], ks[1]);
-
- // Performs a single round of the Threefry2x32 algorithm, with a rotation
- // amount 'rotation'.
- auto round = [builder](ThreeFry2x32State v, int rotation) {
- v[0] = xla::Add(v[0], v[1]);
- v[1] = RotateLeftS32(builder, v[1], rotation);
- v[1] = xla::Xor(v[0], v[1]);
- return v;
- };
-
- // There are no known statistical flaws with 13 rounds of Threefry2x32.
- // We are conservative and use 20 rounds.
- x = round(x, rotations[0]);
- x = round(x, rotations[1]);
- x = round(x, rotations[2]);
- x = round(x, rotations[3]);
- x[0] = xla::Add(x[0], ks[1]);
- x[1] = xla::Add(xla::Add(x[1], ks[2]), xla::ConstantR0<int32>(builder, 1));
-
- x = round(x, rotations[4]);
- x = round(x, rotations[5]);
- x = round(x, rotations[6]);
- x = round(x, rotations[7]);
- x[0] = xla::Add(x[0], ks[2]);
- x[1] = xla::Add(xla::Add(x[1], ks[0]), xla::ConstantR0<int32>(builder, 2));
-
- x = round(x, rotations[0]);
- x = round(x, rotations[1]);
- x = round(x, rotations[2]);
- x = round(x, rotations[3]);
- x[0] = xla::Add(x[0], ks[0]);
- x[1] = xla::Add(xla::Add(x[1], ks[1]), xla::ConstantR0<int32>(builder, 3));
-
- x = round(x, rotations[4]);
- x = round(x, rotations[5]);
- x = round(x, rotations[6]);
- x = round(x, rotations[7]);
- x[0] = xla::Add(x[0], ks[1]);
- x[1] = xla::Add(xla::Add(x[1], ks[2]), xla::ConstantR0<int32>(builder, 4));
-
- x = round(x, rotations[0]);
- x = round(x, rotations[1]);
- x = round(x, rotations[2]);
- x = round(x, rotations[3]);
- x[0] = xla::Add(x[0], ks[2]);
- x[1] = xla::Add(xla::Add(x[1], ks[0]), xla::ConstantR0<int32>(builder, 5));
-
- return x;
-}
-
-// Returns a tensor of 'shape' random values uniformly distributed in the range
-// [minval, maxval)
-xla::XlaOp RandomUniform(xla::XlaBuilder* builder, const xla::XlaOp& seed,
- const TensorShape& shape, double minval,
- double maxval) {
- // Split the seed into two 32-bit scalars to form a key.
- auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
- auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
- ThreeFry2x32State key = {seed0, seed1};
- const int64 size = shape.num_elements();
-
- const int64 half_size = MathUtil::CeilOfRatio<int64>(size, 2);
- const bool size_is_odd = (half_size * 2 != size);
-
- // Fill the generator inputs with unique counter values.
- ThreeFry2x32State inputs;
- inputs[0] = xla::Iota(builder, xla::S32, half_size);
- inputs[1] = xla::Add(inputs[0], xla::ConstantR0<int32>(builder, half_size));
- ThreeFry2x32State outputs = ThreeFry2x32(builder, inputs, key);
-
- if (size_is_odd) {
- outputs[1] = xla::Slice(outputs[1], {0}, {half_size - 1}, {1});
- }
-
- auto bits =
- xla::Reshape(xla::ConcatInDim(builder, outputs, 0), shape.dim_sizes());
-
- // Form 22 random mantissa bits, with a leading 1 bit. The leading 1 bit
- // forces the random bits into the mantissa.
- constexpr int kFloatBits = 32;
- constexpr int kMantissaBits = 23;
- bits = xla::Or(
- xla::ShiftRightLogical(
- bits, xla::ConstantR0<int32>(builder, kFloatBits - kMantissaBits)),
- xla::ConstantR0<int32>(builder, bit_cast<int32>(1.0f)));
- auto floats = xla::BitcastConvertType(bits, xla::F32);
-
- // We have a floating point number in the range [1.0, 2.0).
- // Subtract 1.0f to shift to the range [0.0, 1.0)
- floats = xla::Sub(floats, xla::ConstantR0<float>(builder, 1.0f));
- // Multiply and add to shift to the range [minval, maxval).
- floats = xla::Mul(floats, xla::ConstantR0<float>(builder, maxval - minval));
- floats = xla::Add(floats, xla::ConstantR0<float>(builder, minval));
- return floats;
-}
-
-} // namespace
-
class StatelessRandomUniformOp : public XlaOpKernel {
public:
explicit StatelessRandomUniformOp(OpKernelConstruction* ctx)
@@ -177,7 +50,17 @@ class StatelessRandomUniformOp : public XlaOpKernel {
errors::InvalidArgument("seed must have shape [2], not ",
seed_shape.DebugString()));
xla::XlaOp seed = ctx->Input(1);
- ctx->SetOutput(0, RandomUniform(builder, seed, shape, 0.0, 1.0));
+
+ xla::Shape xla_shape;
+ OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+
+ auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+ auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+ auto uniform = xla::StatelessRngUniform(
+ {seed0, seed1}, xla_shape, xla::ConstantR0<float>(builder, 0.0),
+ xla::ConstantR0<float>(builder, 1.0));
+ ctx->SetOutput(0, uniform);
}
private:
@@ -206,8 +89,16 @@ class StatelessRandomNormalOp : public XlaOpKernel {
seed_shape.DebugString()));
xla::XlaOp seed = ctx->Input(1);
xla::XlaBuilder* builder = ctx->builder();
- auto uniform =
- RandomUniform(builder, seed, shape, std::nextafter(-1.0f, 0.0f), 1.0);
+ xla::Shape xla_shape;
+ OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+
+ auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+ auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+ auto uniform = xla::StatelessRngUniform(
+ {seed0, seed1}, xla_shape,
+ xla::ConstantR0<float>(builder, std::nextafter(-1.0f, 0.0f)),
+ xla::ConstantR0<float>(builder, 1.0));
// Convert uniform distribution to normal distribution by computing
// sqrt(2) * erfinv(x)
auto normal =
@@ -240,10 +131,18 @@ class StatelessTruncatedNormalOp : public XlaOpKernel {
errors::InvalidArgument("seed must have shape [2], not ",
seed_shape.DebugString()));
xla::XlaOp seed = ctx->Input(1);
- xla::XlaBuilder* b = ctx->builder();
+ xla::XlaBuilder* builder = ctx->builder();
+
+ auto seed0 = xla::Reshape(xla::Slice(seed, {0}, {1}, {1}), {});
+ auto seed1 = xla::Reshape(xla::Slice(seed, {1}, {2}, {1}), {});
+
+ xla::Shape xla_shape;
+ OP_REQUIRES_OK(ctx, TensorShapeToXLAShape(DT_FLOAT, shape, &xla_shape));
+ auto uniform = xla::StatelessRngUniform(
+ {seed0, seed1}, xla_shape,
+ xla::ConstantR0<float>(builder, std::numeric_limits<float>::min()),
+ xla::ConstantR0<float>(builder, 1.0));
- auto uniform =
- RandomUniform(b, seed, shape, std::numeric_limits<float>::min(), 1.0);
ctx->SetOutput(0, TruncatedNormal(uniform));
}
@@ -257,4 +156,5 @@ REGISTER_XLA_OP(Name("StatelessTruncatedNormal")
.TypeConstraint("Tseed", DT_INT32),
StatelessTruncatedNormalOp);
+} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/topk_op.cc b/tensorflow/compiler/tf2xla/kernels/topk_op.cc
index 1ddcb08c8e..82d4a69777 100644
--- a/tensorflow/compiler/tf2xla/kernels/topk_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/topk_op.cc
@@ -41,33 +41,35 @@ class TopKOp : public XlaOpKernel {
OP_REQUIRES(context, input_shape.dims() >= 1,
errors::InvalidArgument("input must be >= 1-D, got shape ",
input_shape.DebugString()));
+ int last_dim = input_shape.dims() - 1;
+ int last_dim_size = input_shape.dim_size(last_dim);
OP_REQUIRES(
- context, input_shape.dim_size(input_shape.dims() - 1) >= k,
+ context, last_dim_size >= k,
errors::InvalidArgument("input must have at least k columns. Had ",
- input_shape.dim_size(input_shape.dims() - 1),
- ", needed ", k));
-
- OP_REQUIRES(
- context, input_shape.dims() == 1,
- errors::Unimplemented("TopK is implemented for 1-D inputs, got shape ",
- input_shape.DebugString()));
+ last_dim_size, ", needed ", k));
xla::XlaBuilder* const b = context->builder();
- if (input_shape.dim_size(0) < k) {
- k = input_shape.dim_size(0);
+ if (last_dim_size < k) {
+ k = last_dim_size;
}
const xla::XlaOp input = context->Input(0);
- xla::XlaOp iota_s32 = xla::Iota(b, xla::S32, input_shape.dim_size(0));
- xla::XlaOp sort_result = xla::Sort(xla::Neg(input), iota_s32);
+
+ xla::XlaOp iota_s32 = xla::Iota(b, xla::S32, last_dim_size);
+ auto input_dims = input_shape.dim_sizes();
+ std::vector<int64> broadcast_dims(input_dims.begin(), input_dims.end() - 1);
+ xla::XlaOp broadcast_s32 = xla::Broadcast(iota_s32, broadcast_dims);
+ xla::XlaOp sort_result = xla::Sort(xla::Neg(input), broadcast_s32);
+
+ std::vector<int64> start_indices(input_shape.dims(), 0);
+ std::vector<int64> limit_indices(input_dims.begin(), input_dims.end());
+ limit_indices[last_dim] = k;
+ std::vector<int64> strides(input_shape.dims(), 1);
+
xla::XlaOp values =
- xla::Neg(xla::Slice(xla::GetTupleElement(sort_result, 0),
- /*start_indices=*/{0},
- /*limit_indices=*/{k},
- /*strides=*/{1}));
+ xla::Neg(xla::Slice(xla::GetTupleElement(sort_result, 0), start_indices,
+ limit_indices, strides));
xla::XlaOp indices = xla::Slice(xla::GetTupleElement(sort_result, 1),
- /*start_indices=*/{0},
- /*limit_indices=*/{k},
- /*strides=*/{1});
+ start_indices, limit_indices, strides);
context->SetOutput(0, values);
context->SetOutput(1, indices);
}
diff --git a/tensorflow/compiler/tf2xla/kernels/unary_ops.cc b/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
index 116a020437..e6ec794cfd 100644
--- a/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
+++ b/tensorflow/compiler/tf2xla/kernels/unary_ops.cc
@@ -51,43 +51,18 @@ XLAJIT_MAKE_UNARY(Conj, xla::Conj(x));
// Return x if x>0, otherwise -x.
XLAJIT_MAKE_UNARY(Abs, xla::Abs(x));
-
-// acos(x) = 2 * atan(sqrt(1 - x^2) / (1 + x))
-XLAJIT_MAKE_UNARY(Acos,
- xla::ScalarLike(x, 2.0) *
- xla::Atan2(xla::Sqrt(xla::ScalarLike(x, 1.0) - x * x),
- xla::ScalarLike(x, 1.0) + x));
-
-// acosh(x) = log(x + sqrt(x^2 - 1))
-// = log(x + sqrt((x+1)*(x-1)))
-XLAJIT_MAKE_UNARY(Acosh,
- xla::Log(x + xla::Sqrt((x + xla::ScalarLike(x, 1.0)) *
- (x - xla::ScalarLike(x, 1.0)))));
-
-// asin(x) = 2 * atan(x / (1 + sqrt(1 - x^2)))
-XLAJIT_MAKE_UNARY(
- Asin, xla::ScalarLike(x, 2.0) *
- xla::Atan2(x, xla::ScalarLike(x, 1.0) +
- xla::Sqrt(xla::ScalarLike(x, 1.0) - x * x)));
-
-// asinh(x) = log(x + sqrt(x^2 + 1))
-XLAJIT_MAKE_UNARY(Asinh,
- xla::Log(x + xla::Sqrt(x * x + xla::ScalarLike(x, 1.0))));
-
-XLAJIT_MAKE_UNARY(Atan, xla::Atan2(x, xla::ScalarLike(x, 1.0)));
-
-// atanh(x) = 0.5 * log((1 + x) / (1 - x))
-XLAJIT_MAKE_UNARY(Atanh, xla::Log((xla::ScalarLike(x, 1.0) + x) /
- (xla::ScalarLike(x, 1.0) - x)) *
- xla::ScalarLike(x, 0.5));
+XLAJIT_MAKE_UNARY(Acos, xla::Acos(x));
+XLAJIT_MAKE_UNARY(Acosh, xla::Acosh(x));
+XLAJIT_MAKE_UNARY(Asin, xla::Asin(x))
+XLAJIT_MAKE_UNARY(Asinh, xla::Asinh(x));
+XLAJIT_MAKE_UNARY(Atan, xla::Atan(x));
+XLAJIT_MAKE_UNARY(Atanh, xla::Atanh(x));
XLAJIT_MAKE_UNARY(Ceil, xla::Ceil(x));
XLAJIT_MAKE_UNARY(Cos, xla::Cos(x));
-XLAJIT_MAKE_UNARY(Cosh, (xla::Exp(x) + xla::Exp(-x)) * xla::ScalarLike(x, 0.5));
+XLAJIT_MAKE_UNARY(Cosh, xla::Cosh(x));
XLAJIT_MAKE_UNARY(Sin, xla::Sin(x));
XLAJIT_MAKE_UNARY(Exp, xla::Exp(x));
-
XLAJIT_MAKE_UNARY(Expm1, xla::Expm1(x));
-
XLAJIT_MAKE_UNARY(Floor, xla::Floor(x));
XLAJIT_MAKE_UNARY(IsFinite, xla::IsFinite(x));
XLAJIT_MAKE_UNARY(
@@ -99,7 +74,6 @@ XLAJIT_MAKE_UNARY(IsNan, xla::Ne(x, x));
XLAJIT_MAKE_UNARY(Inv, xla::ScalarLike(x, 1.0) / x);
XLAJIT_MAKE_UNARY(Reciprocal, xla::ScalarLike(x, 1.0) / x);
XLAJIT_MAKE_UNARY(Log, xla::Log(x));
-
XLAJIT_MAKE_UNARY(Log1p, xla::Log1p(x));
XLAJIT_MAKE_UNARY(Invert, xla::Not(x));
@@ -136,7 +110,7 @@ XLAJIT_MAKE_UNARY(Sigmoid, Sigmoid(x));
// Returns 0 if x is 0, -1 if x < 0 and 1 if x > 0.
XLAJIT_MAKE_UNARY(Sign, xla::Sign(x));
-XLAJIT_MAKE_UNARY(Sinh, (xla::Exp(x) - xla::Exp(-x)) * xla::ScalarLike(x, 0.5));
+XLAJIT_MAKE_UNARY(Sinh, xla::Sinh(x));
// softplus(x) = log(1 + exp(x))
//
@@ -153,7 +127,7 @@ XLAJIT_MAKE_UNARY(Softplus, xla::Max(x, xla::ScalarLike(x, 0.0)) +
XLAJIT_MAKE_UNARY(Softsign, x / (xla::Abs(x) + xla::ScalarLike(x, 1.0)));
XLAJIT_MAKE_UNARY(Sqrt, xla::Sqrt(x));
XLAJIT_MAKE_UNARY(Square, x* x);
-XLAJIT_MAKE_UNARY(Tan, xla::Sin(x) / xla::Cos(x));
+XLAJIT_MAKE_UNARY(Tan, xla::Tan(x));
XLAJIT_MAKE_UNARY(Tanh, xla::Tanh(x));
XLAJIT_MAKE_UNARY(Real, xla::Real(x));
@@ -189,5 +163,51 @@ class ErfcOp : public XlaOpKernel {
};
REGISTER_XLA_OP(Name("Erfc"), ErfcOp);
+class LgammaOp : public XlaOpKernel {
+ public:
+ explicit LgammaOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+ // Calculate lgamma using the Lanczos approximation
+ // (https://en.wikipedia.org/wiki/Lanczos_approximation).
+ void Compile(XlaOpKernelContext* ctx) override {
+ xla::XlaOp input = ctx->Input(0);
+ xla::PrimitiveType input_type = ctx->input_xla_type(0);
+
+ if (input_type == xla::F16 || input_type == xla::BF16) {
+ // The approximation works better with at least 32-bits of accuracy.
+ xla::XlaOp input_f32 = xla::ConvertElementType(input, xla::F32);
+ xla::XlaOp result_f32 = xla::Lgamma(input_f32);
+ xla::XlaOp result_x16 = xla::ConvertElementType(result_f32, input_type);
+ ctx->SetOutput(0, result_x16);
+ } else {
+ xla::XlaOp result = xla::Lgamma(input);
+ ctx->SetOutput(0, result);
+ }
+ }
+}; // namespace
+REGISTER_XLA_OP(Name("Lgamma"), LgammaOp);
+
+class DigammaOp : public XlaOpKernel {
+ public:
+ explicit DigammaOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
+ // Calculate lgamma using the Lanczos approximation
+ // (https://en.wikipedia.org/wiki/Lanczos_approximation).
+ void Compile(XlaOpKernelContext* ctx) override {
+ xla::XlaOp input = ctx->Input(0);
+ xla::PrimitiveType input_type = ctx->input_xla_type(0);
+
+ if (input_type == xla::F16 || input_type == xla::BF16) {
+ // The approximation works better with at least 32-bits of accuracy.
+ xla::XlaOp input_f32 = xla::ConvertElementType(input, xla::F32);
+ xla::XlaOp result_f32 = xla::Digamma(input_f32);
+ xla::XlaOp result_x16 = xla::ConvertElementType(result_f32, input_type);
+ ctx->SetOutput(0, result_x16);
+ } else {
+ xla::XlaOp result = xla::Digamma(input);
+ ctx->SetOutput(0, result);
+ }
+ }
+}; // namespace
+REGISTER_XLA_OP(Name("Digamma"), DigammaOp);
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/kernels/while_op.cc b/tensorflow/compiler/tf2xla/kernels/while_op.cc
index 9413a30a6c..009fdd81b2 100644
--- a/tensorflow/compiler/tf2xla/kernels/while_op.cc
+++ b/tensorflow/compiler/tf2xla/kernels/while_op.cc
@@ -299,6 +299,7 @@ void XlaWhileOp::Compile(XlaOpKernelContext* ctx) {
VLOG(1) << "Done building while loop";
}
+REGISTER_XLA_OP(Name("While").AllowResourceTypes(), XlaWhileOp);
REGISTER_XLA_OP(Name("XlaWhile").AllowResourceTypes(), XlaWhileOp);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/BUILD b/tensorflow/compiler/tf2xla/lib/BUILD
index becc8b84fe..30039e256a 100644
--- a/tensorflow/compiler/tf2xla/lib/BUILD
+++ b/tensorflow/compiler/tf2xla/lib/BUILD
@@ -120,6 +120,7 @@ cc_library(
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla/client/lib:constants",
+ "//tensorflow/compiler/xla/client/lib:numeric",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
"//tensorflow/compiler/xla/client/xla_client:xla_computation",
"//tensorflow/core:lib",
diff --git a/tensorflow/compiler/tf2xla/lib/scatter.cc b/tensorflow/compiler/tf2xla/lib/scatter.cc
index 6a5be1c2be..739032fef7 100644
--- a/tensorflow/compiler/tf2xla/lib/scatter.cc
+++ b/tensorflow/compiler/tf2xla/lib/scatter.cc
@@ -132,7 +132,7 @@ xla::StatusOr<xla::XlaOp> XlaScatter(
// Discard updates with negative indices, since some users expect this.
auto index_in_range = xla::ReduceAll(
xla::Le(zero_index, index), xla::ConstantR0<bool>(body_builder, true),
- xla::CreateScalarAndComputation(body_builder));
+ xla::CreateScalarAndComputation(xla::PRED, body_builder));
// Make the index in bounds to prevent implementation defined behavior.
index = xla::Max(index, zero_index);
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
index ce0f28db8f..75c0ad7f7e 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.cc
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/tf2xla/lib/batch_dot.h"
#include "tensorflow/compiler/tf2xla/lib/util.h"
#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -28,9 +29,307 @@ limitations under the License.
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/lib/math/math_util.h"
namespace tensorflow {
+// Get the diagonal blocks of the coefficient matrix
+xla::XlaOp DiagonalBlocks(xla::XlaOp a, int64 block_size) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(a));
+ int ndims = xla::ShapeUtil::Rank(shape);
+ int64 n = xla::ShapeUtil::GetDimension(shape, -1);
+ int64 num_blocks = n / block_size;
+
+ xla::XlaOp diag_blocks;
+
+ // If the coefficient matrix is exactly the block size, we just add a
+ // singleton dimension i.e. [..., n, n] -> [..., 1, n, n]
+ if (n == block_size) {
+ std::vector<int64> permutation(ndims);
+ std::iota(permutation.begin(), permutation.end(), 1);
+ permutation.insert(permutation.end() - 2, 0);
+ return Transpose(Broadcast(a, /*broadcast_sizes=*/{1}), permutation);
+ }
+
+ // We can grab entire blocks using gather
+ if (n > block_size) {
+ // Construct the starting indices of the diagonal blocks
+ auto gather_indices =
+ Transpose(Broadcast(Mul(Iota(builder, xla::S32, num_blocks),
+ xla::ConstantR0<int32>(builder, block_size)),
+ /*broadcast_sizes=*/{2}),
+ /*permutation=*/{1, 0});
+
+ // Gather the diagonal blocks
+ xla::GatherDimensionNumbers dim_numbers;
+ dim_numbers.add_output_window_dims(ndims - 1);
+ dim_numbers.add_output_window_dims(ndims);
+ dim_numbers.add_gather_dims_to_operand_dims(ndims - 2);
+ dim_numbers.add_gather_dims_to_operand_dims(ndims - 1);
+ dim_numbers.set_index_vector_dim(1);
+ diag_blocks = Gather(a, gather_indices, dim_numbers,
+ /*window_bounds=*/{block_size, block_size});
+ }
+
+ // The last block might be smaller than the block size,
+ // so we will need to pad it
+ if (n % block_size != 0) {
+ // Pad with zeros
+ auto last_blocks =
+ SliceInMinorDims(a, {n - n % block_size, n - n % block_size}, {n, n});
+ xla::PaddingConfig config = xla::MakeNoPaddingConfig(ndims);
+ int64 padding = block_size - n % block_size;
+ config.mutable_dimensions(ndims - 1)->set_edge_padding_high(padding);
+ config.mutable_dimensions(ndims - 2)->set_edge_padding_high(padding);
+ last_blocks =
+ Pad(last_blocks, Zero(builder, shape.element_type()), config);
+
+ // Add a singleton dimension
+ // i.e. [..., block_size, block_size] -> [..., 1, block_size, block_size]
+ TF_ASSIGN_OR_RETURN(xla::Shape blocks_shape,
+ builder->GetShape(last_blocks));
+ auto shape_dims = xla::AsInt64Slice(blocks_shape.dimensions());
+ auto last_blocks_dims = std::vector<int64>(ndims);
+ std::copy(shape_dims.begin(), shape_dims.end(), last_blocks_dims.begin());
+ last_blocks_dims.insert(last_blocks_dims.end() - 2, 1);
+ last_blocks = Reshape(last_blocks, last_blocks_dims);
+
+ // Concatenate with the other blocks if necessary
+ if (n > block_size) {
+ diag_blocks =
+ xla::ConcatInDim(builder, {diag_blocks, last_blocks}, ndims - 2);
+ } else {
+ diag_blocks = last_blocks;
+ }
+ }
+
+ return diag_blocks;
+ });
+}
+
+xla::XlaOp InvertDiagonalBlocks(xla::XlaOp diag_blocks, bool lower,
+ bool transpose_a, bool conjugate_a) {
+ xla::XlaBuilder* builder = diag_blocks.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ // Input is a batch of square lower triangular square matrices. Its shape is
+ // (..., size, size). We resize this to (num_blocks, size, size).
+ TF_ASSIGN_OR_RETURN(xla::Shape shape, builder->GetShape(diag_blocks));
+ int64 block_size = xla::ShapeUtil::GetDimension(shape, -1);
+ int64 num_blocks = xla::ShapeUtil::ElementsIn(shape) /
+ tensorflow::MathUtil::IPow(block_size, 2);
+ diag_blocks = Reshape(diag_blocks, {num_blocks, block_size, block_size});
+
+ // The input must be triangular because we rely on that when doing
+ // multiplications later on
+ diag_blocks = Triangle(diag_blocks, /*lower=*/lower);
+
+ // Rescale blocks to be unit triangular, but avoid dividing by
+ // zero (which can happen if the last block was padded) otherwise it will
+ // introduce nans which will propagate
+ auto diags = GetMatrixDiagonal(diag_blocks);
+ TF_ASSIGN_OR_RETURN(xla::Shape diags_shape, builder->GetShape(diags));
+ auto one = ScalarLike(diags, 1);
+ auto ones = Broadcast(one, xla::AsInt64Slice(diags_shape.dimensions()));
+ diags = Select(Eq(diags, Zero(builder, shape.element_type())), ones, diags);
+ auto scaled_diag_blocks = Div(diag_blocks, diags, {0, 2});
+
+ // We can now use the fact that for an upper triangular matrix
+ // [[L11, 0], [L21, L22]], given the inverses L11' and L22', we have
+ // L22' = -L22' * L21 * L11'. In our case, L21 is a vector and our blocks
+ // have been rescaled to be unit triangular, so L22 = L22' = 1.
+
+ // Initialize the output matrix with -1s on the diagonal. We use -1 instead
+ // of 1 because we cannot do matrix-vector multiplies with variable shapes
+ // inside of a loop, or do irregularly shaped in-place updates. Hence,
+ // L21 <- -L22 * L21 * L11 cannot be done naively. Instead, we update the
+ // entire row i.e. we calculate
+ // [L21 L22 0] <- -[L21 L22 0] @ diag_blocks([L11', -I, -I])
+ // which means [L21 L22 0] <- [-L21 * L11', L22, 0].
+ auto identity =
+ IdentityMatrix(builder, shape.element_type(), block_size, block_size);
+ auto neg_identity = -identity;
+
+ // The first or last diagonal element should be set to 1 instead of -1
+ // though, since we never update it
+ auto pos_one = Reshape(One(builder, shape.element_type()), {1, 1});
+ auto start_index = (lower) ? 0 : block_size - 1;
+ auto output_block = DynamicUpdateSlice(
+ neg_identity, pos_one,
+ /*start_indices=*/xla::ConstantR1<int>(builder, 2, start_index));
+
+ // Broadcast diag([1, -1, -1, ...]) to every block
+ xla::XlaOp output = Broadcast(output_block,
+ /*broadcast_sizes=*/{num_blocks});
+
+ // Now we construct a loop that performs matrix-vector multiplications
+ // inverting the blocks one row at a time
+ std::vector<xla::Shape> tuple_shapes = {
+ // The loop iteration counter is a scalar, incremented each iteration.
+ xla::ShapeUtil::MakeShape(xla::S32, {}),
+ // The output has the shape of A, with one row updated each iteration.
+ xla::ShapeUtil::MakeShape(shape.element_type(),
+ {num_blocks, block_size, block_size}),
+ // The input is a loop invariant.
+ xla::ShapeUtil::MakeShape(shape.element_type(),
+ {num_blocks, block_size, block_size})};
+ xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
+
+ auto init_i = One(builder, xla::S32);
+ auto init = xla::Tuple(builder, {init_i, output, scaled_diag_blocks});
+
+ // Construct the loop condition function.
+ std::unique_ptr<xla::XlaBuilder> condb =
+ builder->CreateSubBuilder("InvertDiagCond");
+ {
+ auto i = GetTupleElement(
+ Parameter(condb.get(), 0, tuple_shape, "InvertDiagCondTuple"), 0);
+ Lt(i, xla::ConstantR0<int32>(condb.get(), block_size));
+ }
+ TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
+
+ // Construct the loop body function.
+ std::unique_ptr<xla::XlaBuilder> bodyb =
+ builder->CreateSubBuilder("InvertDiagBody");
+ {
+ auto input_tuple =
+ Parameter(bodyb.get(), 0, tuple_shape, "InvertDiagBodyTuple");
+
+ auto i = GetTupleElement(input_tuple, 0);
+ auto body_out = GetTupleElement(input_tuple, 1);
+ auto body_input = GetTupleElement(input_tuple, 2);
+
+ auto zero = xla::ConstantR1<int32>(bodyb.get(), 1, 0);
+ auto j = (lower) ? i : ScalarLike(i, block_size - 1) - i;
+ auto start_indices =
+ xla::ConcatInDim(bodyb.get(), {zero, Reshape(j, {1}), zero}, 0);
+ auto input_row =
+ DynamicSlice(body_input, start_indices,
+ /*slice_sizes=*/{num_blocks, 1, block_size});
+
+ // We want -L21 L11^{-1}
+ xla::DotDimensionNumbers dnums;
+ dnums.add_lhs_batch_dimensions(0);
+ dnums.add_rhs_batch_dimensions(0);
+ dnums.add_lhs_contracting_dimensions(2);
+ dnums.add_rhs_contracting_dimensions(1);
+ auto update = -DotGeneral(input_row, body_out, dnums);
+
+ body_out = DynamicUpdateSlice(body_out, update, start_indices);
+
+ auto next_i = i + ScalarLike(i, 1);
+ xla::Tuple(bodyb.get(), {next_i, body_out, body_input});
+ }
+ TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
+
+ // Construct the While loop and return the result,
+ // return while_loop(cond_fun, body_fun, init)[1]
+ auto invert_while = While(cond, body, init);
+ auto inv_diag_blocks = GetTupleElement(invert_while, 1);
+
+ // Undo the scaling
+ inv_diag_blocks = Div(inv_diag_blocks, diags,
+ /*broadcast_dimensions=*/{0, 1});
+
+ // Reshape back to original batch major dimensions
+ return Reshape(inv_diag_blocks, xla::AsInt64Slice(shape.dimensions()));
+ });
+}
+
+xla::XlaOp SolveWithInvertedDiagonalBlocks(xla::XlaOp a, xla::XlaOp b,
+ xla::XlaOp inv_diag_blocks,
+ bool left_side, bool lower,
+ bool transpose_a, bool conjugate_a) {
+ xla::XlaBuilder* builder = a.builder();
+ return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
+ TF_ASSIGN_OR_RETURN(xla::Shape blocks_shape,
+ builder->GetShape(inv_diag_blocks));
+ TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
+ int64 block_size = xla::ShapeUtil::GetDimension(blocks_shape, -1);
+
+ TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
+ int64 ndims = xla::ShapeUtil::Rank(a_shape);
+ int64 n = xla::ShapeUtil::GetDimension(a_shape, -1);
+ int64 num_blocks = n / block_size + (n % block_size != 0);
+ int64 m_dim = (left_side) ? -1 : -2;
+ int64 m = xla::ShapeUtil::GetDimension(b_shape, m_dim);
+
+ // Initialize the solution
+ auto x = ZerosLike(b);
+
+ // This loop is unrolled for performance reasons, but it could be expressed
+ // rolled as well since the matrices are of the same size each iteration
+ for (int i = 0; i < num_blocks; i++) {
+ // High-level intuition: We have B[i] = L[i] @ X. Since L is upper
+ // triangular this means B[i] = L[i, :i + 1] @ X[:i + 1]. We can split
+ // this into two parts: B[i] = L[i, :i] @ X[:i] + L[i, i] @ X[i] which
+ // can be solved for X[i] as X[i] = inv(L[i, i]) @ B[i] - L[i, :i] @ X[:i]
+
+ // Decide whether we go from first block to last or vice versa
+ auto j = (left_side ^ lower ^ transpose_a) ? num_blocks - 1 - i : i;
+
+ // Get the size of the inverse blocks (the last one might be smaller)
+ int64 block = (n % block_size != 0 && j + 1 == num_blocks)
+ ? n % block_size
+ : block_size;
+ auto inv_block =
+ MaybeConjugate(Collapse(SliceInMinorDims(inv_diag_blocks, {j, 0, 0},
+ {j + 1, block, block}),
+ /*dimensions=*/{ndims - 2, ndims - 1}),
+ conjugate_a);
+
+ // Get the corresponding row of B
+ int64 k = std::min((j + 1) * block_size, n);
+ std::vector<int64> start = {j * block_size, 0};
+ std::vector<int64> end = {k, m};
+ if (!left_side) {
+ std::swap(start[0], start[1]);
+ std::swap(end[0], end[1]);
+ }
+ auto b_row = SliceInMinorDims(b, start, end);
+
+ xla::XlaOp remainder;
+ if (i == 0) {
+ remainder = b_row;
+ } else {
+ // This matrix multiply involves a lot of multiplying with zero (namely,
+ // X[i * block_size:] = 0), but this is faster than slicing...
+ end = {k, n};
+ if (!left_side) {
+ std::swap(end[0], end[1]);
+ }
+ if (transpose_a) {
+ std::swap(start[0], start[1]);
+ std::swap(end[0], end[1]);
+ }
+ auto a_row =
+ MaybeConjugate(SliceInMinorDims(a, start, end), conjugate_a);
+ if (left_side) {
+ remainder = b_row - BatchDot(a_row, x, transpose_a, false);
+ } else {
+ remainder = b_row - BatchDot(x, a_row, false, transpose_a);
+ }
+ }
+
+ xla::XlaOp x_update;
+ auto zero = Zero(builder, xla::S32);
+ auto start_index =
+ xla::ConstantR0WithType(builder, xla::S32, j * block_size);
+ std::vector<xla::XlaOp> update_starts = {start_index, zero};
+ if (left_side) {
+ x_update = BatchDot(inv_block, remainder, transpose_a, false);
+ } else {
+ x_update = BatchDot(remainder, inv_block, false, transpose_a);
+ std::swap(update_starts[0], update_starts[1]);
+ }
+ x = DynamicUpdateSliceInMinorDims(x, x_update, /*starts=*/update_starts);
+ }
+
+ return x;
+ });
+}
+
xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
bool lower, bool transpose_a, bool conjugate_a,
int64 block_size) {
@@ -44,7 +343,7 @@ xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
xla::ShapeUtil::HumanString(a_shape), " vs. ",
xla::ShapeUtil::HumanString(b_shape));
}
- const int ndims = xla::ShapeUtil::Rank(a_shape);
+ const int64 ndims = xla::ShapeUtil::Rank(a_shape);
if (ndims < 2) {
return errors::InvalidArgument(
"Arguments to TriangularSolve must have rank >= 2: ", ndims);
@@ -84,510 +383,18 @@ xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
block_size);
}
- std::map<int, xla::XlaComputation> base_computations;
- auto get_base_triangular_solve =
- [&](int k) -> xla::StatusOr<xla::XlaComputation*> {
- xla::XlaComputation& computation = base_computations[k];
- if (computation.IsNull()) {
- std::unique_ptr<xla::XlaBuilder> sub = builder->CreateSubBuilder(
- tensorflow::strings::StrCat("trsm_base_", k));
-
- auto a_param = xla::Parameter(
- sub.get(), 0,
- xla::ShapeUtil::MakeShape(b_shape.element_type(),
- ConcatVectors(batch_dimensions, {k, k})),
- "a");
-
- std::array<int64, 2> b_lastd;
- if (left_side) {
- b_lastd = {k, n};
- } else {
- b_lastd = {m, k};
- }
- auto b_param = xla::Parameter(
- sub.get(), 1,
- xla::ShapeUtil::MakeShape(b_shape.element_type(),
- ConcatVectors(batch_dimensions, b_lastd)),
- "b");
-
- // We use a left-looking or right-looking subroutine on the block
- // diagonal in the lower=true cases, while falling back to a recursive
- // call in others. The left-looking and right-looking subroutines are
- // written with a While loop and so yields much faster compile times.
- // Moreover, they can give higher performance on smaller (sub)problems.
- if (left_side && lower) {
- TriangularSolveLeftLooking(a_param, b_param, transpose_a,
- conjugate_a);
- } else if (!left_side && lower) {
- TriangularSolveRightLooking(a_param, b_param, transpose_a,
- conjugate_a);
- } else {
- TriangularSolve(a_param, b_param, left_side, lower, transpose_a,
- conjugate_a,
- /*block_size=*/1);
- }
-
- TF_ASSIGN_OR_RETURN(computation, sub->Build());
- }
- return &computation;
- };
-
- xla::XlaOp output = xla::ZerosLike(b);
-
- // Right-looking blocked triangular solve.
- // For an explanation of the algorithm, see the TRSM discussion in:
- // Goto, Kazushige, and Robert Van De Geijn. "High-performance
- // implementation of the level-3 BLAS." ACM Transactions on Mathematical
- // Software (TOMS) 35.1 (2008): 4.
-
- // In the code comments below, T = lambda x: np.swapaxes(x, -1, -2) if
- // conjugate_a is False, or T = lambda x: np.conj(np.swapaxes(x, -1, -2)) if
- // conjugate_a is True.
-
- if (!left_side && lower == transpose_a) {
- // for i in range(0, a.shape[-1], block_size):
- for (int64 i = 0; i < n; i += block_size) {
- int64 k = std::min(block_size, n - i);
-
- // output[..., :, i:i+k] = triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., :, i:i+k], ..., block_size=1)
- auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
- auto b_slice = SliceInMinorDims(b, {0, i}, {m, i + k});
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = xla::Call(builder, *solve, {a_slice, b_slice});
- } else {
- auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
- update = b_slice / a_slice_conj;
- }
- output = UpdateSliceInMinorDims(output, update, {0, i});
-
- // if i + k < a.shape[-1]:
- // a_slice_2 = a[..., i+k:, i:i+k] if lower else a[..., i:i+k, i+k:]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., :, i+k:] -= np.matmul(output[..., :, i:i+k], a_slice_2)
- if (i + k < n) {
- xla::XlaOp a_slice_2;
- if (lower) {
- a_slice_2 = SliceInMinorDims(a, {i + k, i}, {n, i + k});
- } else {
- a_slice_2 = SliceInMinorDims(a, {i, i + k}, {i + k, n});
- }
-
- auto b_update = BatchDot(update, a_slice_2,
- /*transpose_x=*/false,
- /*transpose_y=*/transpose_a,
- /*conjugate_x=*/false,
- /*conjugate_y=*/conjugate_a);
- auto b_slice_2 = SliceInMinorDims(b, {0, i + k}, {m, n});
- b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {0, i + k});
- }
- }
-
- } else if (left_side && lower != transpose_a) {
- // for i in range(0, a.shape[-1], block_size):
- for (int64 i = 0; i < m; i += block_size) {
- int64 k = std::min(block_size, m - i);
-
- // output[..., i:i+k, :] = triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., i:i+k, :], ..., block_size=1)
- auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
- auto b_slice = SliceInMinorDims(b, {i, 0}, {i + k, n});
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = xla::Call(builder, *solve, {a_slice, b_slice});
- } else {
- auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
- update = b_slice / a_slice_conj;
- }
- output = UpdateSliceInMinorDims(output, update, {i, 0});
-
- // if i + k < a.shape[-1]:
- // a_slice_2 = a[..., i+k:, i:i+k] if lower else a[..., i:i+k, i+k:]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., i+k:, :] -= np.matmul(a_slice_2, output[..., i:i+k, :])
- if (i + k < m) {
- xla::XlaOp a_slice_2;
- if (lower) {
- a_slice_2 = SliceInMinorDims(a, {i + k, i}, {m, i + k});
- } else {
- a_slice_2 = SliceInMinorDims(a, {i, i + k}, {i + k, m});
- }
-
- auto b_update = BatchDot(a_slice_2, update,
- /*transpose_x=*/transpose_a,
- /*transpose_y=*/false,
- /*conjugate_x=*/conjugate_a,
- /*conjugate_y=*/false);
- auto b_slice_2 = SliceInMinorDims(b, {i + k, 0}, {m, n});
- b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {i + k, 0});
- }
- }
- } else if (!left_side && lower != transpose_a) {
- // for i in reversed(range(0, a.shape[-1], block_size)):
- const int64 last_blk_ix =
- xla::RoundUpToNearest(n, block_size) - block_size;
- for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
- int64 k = std::min(block_size, n - i);
-
- // output[..., :, i:i+k] triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., :, i:i+k], ..., block_size=1)
- auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
- auto b_slice = SliceInMinorDims(b, {0, i}, {m, i + k});
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = xla::Call(builder, *solve, {a_slice, b_slice});
- } else {
- auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
- update = b_slice / a_slice_conj;
- }
- output = UpdateSliceInMinorDims(output, update, {0, i});
-
- // if i - k >= 0:
- // a_slice_2 = a[..., i:i+k, :i] if lower else a[..., :i, i:i+k]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., :, :i] -= np.matmul(out[..., :, i:i+k], a_slice_2)
- if (i - k >= 0) {
- xla::XlaOp a_slice_2;
- if (lower) {
- a_slice_2 = SliceInMinorDims(a, {i, 0}, {i + k, i});
- } else {
- a_slice_2 = SliceInMinorDims(a, {0, i}, {i, i + k});
- }
-
- auto b_update = BatchDot(update, a_slice_2,
- /*transpose_x=*/false,
- /*transpose_y=*/transpose_a,
- /*conjugate_x=*/false,
- /*conjugate_y=*/conjugate_a);
- auto b_slice_2 = SliceInMinorDims(b, {0, 0}, {m, i});
- b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {0, 0});
- }
- }
- } else { // left_side && lower == transpose_a
- // for i in reversed(range(0, a.shape[-1], block_size)):
- const int64 last_blk_ix =
- xla::RoundUpToNearest(m, block_size) - block_size;
- for (int64 i = last_blk_ix; i >= 0; i -= block_size) {
- int64 k = std::min(block_size, m - i);
-
- // output[..., i:i+k, :] triangular_solve(
- // a[..., i:i+k, i:i+k], b[..., i:i+k, :], ..., block_size=1)
- auto a_slice = SliceInMinorDims(a, {i, i}, {i + k, i + k});
- auto b_slice = SliceInMinorDims(b, {i, 0}, {i + k, n});
- xla::XlaOp update;
- if (k > 1) {
- TF_ASSIGN_OR_RETURN(xla::XlaComputation * solve,
- get_base_triangular_solve(k));
- update = xla::Call(builder, *solve, {a_slice, b_slice});
- } else {
- auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
- update = b_slice / a_slice_conj;
- }
- output = UpdateSliceInMinorDims(output, update, {i, 0});
-
- // if i - k >= 0:
- // a_slice_2 = a[..., i:i+k, :i] if lower else a[..., :i, i:i+k]
- // a_slice_2 = T(a_slice_2) if transpose_a else a_slice_2
- // b[..., :i, :] -= np.matmul(a_slice_2, out[..., i:i+k, :])
- if (i - k >= 0) {
- xla::XlaOp a_slice_2;
- if (lower) {
- a_slice_2 = SliceInMinorDims(a, {i, 0}, {i + k, i});
- } else {
- a_slice_2 = SliceInMinorDims(a, {0, i}, {i, i + k});
- }
-
- auto b_update = BatchDot(a_slice_2, update,
- /*transpose_x=*/transpose_a,
- /*transpose_y=*/false,
- /*conjugate_x=*/conjugate_a,
- /*conjugate_y=*/false);
- auto b_slice_2 = SliceInMinorDims(b, {0, 0}, {i, n});
- b = UpdateSliceInMinorDims(b, b_slice_2 - b_update, {0, 0});
- }
- }
- }
-
- return output;
- });
-}
-
-xla::XlaOp TriangularSolveLeftLooking(xla::XlaOp a, xla::XlaOp b,
- bool transpose_a, bool conjugate_a) {
- xla::XlaBuilder* builder = a.builder();
- return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
- const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
- const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
- const int64 ndims = xla::ShapeUtil::Rank(a_shape);
-
- std::vector<int64> batch_dimensions;
- for (int i = 0; i < ndims - 2; ++i) {
- int64 a_size = a_shape.dimensions(i);
- batch_dimensions.push_back(a_size);
- }
-
- // The main computation is performed in a While loop.
-
- // Allocate the output and set its first or last row,
- // output = np.zeros_like(b)
- // if transpose_a:
- // output[..., m-1:, :] = b[..., m-1:, :] / a[..., m-1:, m-1:]
- // else:
- // output[..., :1, :] = b[..., :1, :] / a[..., :1, :1]
- xla::XlaOp output = xla::ZerosLike(b);
- {
- auto i = transpose_a ? m - 1 : 0;
- auto a_slice = SliceInMinorDims(a, {i, i}, {i + 1, i + 1});
- auto b_slice = SliceInMinorDims(b, {i, 0}, {i + 1, n});
- auto a_slice_conj = MaybeConjugate(a_slice, conjugate_a);
- auto update = b_slice / a_slice_conj;
- output = UpdateSliceInMinorDims(output, update, {i, 0});
- }
-
- // Construct the initial loop carry tuple,
- // if transpose_a:
- // init = (m-2, output, a, b)
- // else:
- // init = (1, output, a, b)
- std::vector<xla::Shape> tuple_shapes = {
- // The loop iteration counter is a scalar, incremented each iteration.
- xla::ShapeUtil::MakeShape(xla::S32, {}),
- // The output has the shape of b, with one row updated each iteration.
- b_shape,
- // The coefficient matrix a is a loop invariant.
- a_shape,
- // The right-hand-side matrix b is a loop invariant.
- b_shape};
- xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
- auto init_i = xla::ConstantR0<int32>(builder, transpose_a ? m - 2 : 1);
- auto init = xla::Tuple(builder, {init_i, output, a, b});
-
- // Construct the loop condition function,
- // def cond_fun(loop_carry):
- // i, output, a, b = loop_carry
- // return i >= 0 if transpose_a else i < m
- std::unique_ptr<xla::XlaBuilder> condb =
- builder->CreateSubBuilder("TriangularSolveLeftLookingWhileCond");
- {
- auto i = xla::GetTupleElement(
- xla::Parameter(condb.get(), 0, tuple_shape,
- "TriangularSolveLeftLookingWhileTuple"),
- 0);
- if (transpose_a) {
- xla::Ge(i, xla::ConstantR0<int32>(condb.get(), 0));
- } else {
- xla::Lt(i, xla::ConstantR0<int32>(condb.get(), m));
- }
- }
- TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
-
- // Construct the loop body function,
- // def body_fun(loop_carry):
- // i, output, a, b = loop_carry
- // if transpose_a:
- // a_row = np.swapaxes(a[..., i+1:, i:i+1], -1 -2)
- // else:
- // a_row = a[..., i:i+1, :i]
- // result_row = b[..., i:i+1, :] - np.matmul(a_row, output[..., :, :])
- // output[..., i:i+1, :] = result_row / a[..., i:i+1, i:i+1]
- // if transpose_a:
- // return (i - 1, output, a, b)
- // else:
- // return (i + 1, output, a, b)
- // We have to do some extra FLOPs propagating zeros in the matrix multiply
- // because we can't have the size of its arguments depend on the loop
- // counter.
- std::unique_ptr<xla::XlaBuilder> bodyb =
- builder->CreateSubBuilder("TriangularSolveLeftLookingWhileBody");
- {
- auto input_tuple = xla::Parameter(bodyb.get(), 0, tuple_shape,
- "TriangularSolveLeftLookingWhileTuple");
-
- // i, output, a, b = loop_carry
- auto i = xla::GetTupleElement(input_tuple, 0);
- auto body_out = xla::GetTupleElement(input_tuple, 1);
- auto body_a = xla::GetTupleElement(input_tuple, 2);
- auto body_b = xla::GetTupleElement(input_tuple, 3);
- auto zero = xla::ConstantR0<int32>(bodyb.get(), 0);
-
- // We'd like to implement this:
- // if transpose_a:
- // a_row = T(a[..., i+1:, i:i+1])
- // result_row = (b[..., i:i+1, :]
- // - np.matmul(a_row, body_out[..., i+1:, :]))
- // else:
- // result_row = (b[..., i:i+1, :]
- // - np.matmul(a[..., i:i+1, :i], body_out[..., :i, :]))
- // But since we can't have intermediate array sizes depend on the loop
- // counter, we instead exploit the fact that we initialized the output to
- // all zeros and use that as zero-padding (doing unnecessary FLOPs).
- xla::XlaOp a_row;
- if (transpose_a) {
- a_row = DynamicSliceInMinorDims(body_a, {zero, i}, {m, 1});
- } else {
- a_row = DynamicSliceInMinorDims(body_a, {i, zero}, {1, m});
- }
- auto b_update = BatchDot(a_row, body_out,
- /*transpose_x=*/transpose_a,
- /*transpose_y=*/false,
- /*conjugate_x=*/conjugate_a,
- /*conjugate_y=*/false);
- auto result_row_slice =
- DynamicSliceInMinorDims(body_b, {i, zero}, {1, n});
- auto result_row = result_row_slice - b_update;
-
- // body_out[..., i:i+1, :] = result_row / a[..., i:i+1, i:i+1]
- auto a_elt = DynamicSliceInMinorDims(body_a, {i, i}, {1, 1});
- auto a_elt_conj = MaybeConjugate(a_elt, conjugate_a);
- auto div_result = xla::Div(result_row, a_elt_conj);
- body_out = DynamicUpdateSliceInMinorDims(body_out, div_result, {i, zero});
-
- // if transpose_a:
- // return (i - 1, body_out, a, b)
- // else:
- // return (i + 1, body_out, a, b)
- auto next_i = xla::Add(
- i, xla::ConstantR0<int32>(bodyb.get(), transpose_a ? -1 : 1));
- xla::Tuple(bodyb.get(), {next_i, body_out, body_a, body_b});
- }
- TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
-
- // Construct the While loop and return the result,
- // return while_loop(cond_fun, body_fun, init)[1]
- auto triangular_solve_left_looking_while = xla::While(cond, body, init);
- return xla::GetTupleElement(triangular_solve_left_looking_while, 1);
- });
-}
+ // We find the diagonal blocks of the coefficient matrix
+ auto diag_blocks = DiagonalBlocks(a, block_size);
-xla::XlaOp TriangularSolveRightLooking(xla::XlaOp a, xla::XlaOp b,
- bool transpose_a, bool conjugate_a) {
- xla::XlaBuilder* builder = a.builder();
- return builder->ReportErrorOrReturn([&]() -> xla::StatusOr<xla::XlaOp> {
- TF_ASSIGN_OR_RETURN(xla::Shape a_shape, builder->GetShape(a));
- TF_ASSIGN_OR_RETURN(xla::Shape b_shape, builder->GetShape(b));
- const int64 m = xla::ShapeUtil::GetDimension(b_shape, -2);
- const int64 n = xla::ShapeUtil::GetDimension(b_shape, -1);
- const int64 ndims = xla::ShapeUtil::Rank(a_shape);
-
- std::vector<int64> batch_dimensions;
- for (int i = 0; i < ndims - 2; ++i) {
- int64 a_size = a_shape.dimensions(i);
- batch_dimensions.push_back(a_size);
- }
+ // We invert these blocks in parallel using batched matrix-vector products
+ auto inv_diag_blocks =
+ InvertDiagonalBlocks(diag_blocks, lower, transpose_a, conjugate_a);
- // The main computation is performed in a While loop.
- xla::XlaOp output = xla::ZerosLike(b);
+ // We now find the solution using GEMMs
+ auto x = SolveWithInvertedDiagonalBlocks(a, b, inv_diag_blocks, left_side,
+ lower, transpose_a, conjugate_a);
- // Construct the initial loop carry tuple,
- // if transpose_a:
- // init = (0, output, a, b)
- // else:
- // init = (n-1, output, a, b)
- std::vector<xla::Shape> tuple_shapes = {
- // The loop iteration counter is a scalar, incremented each iteration.
- xla::ShapeUtil::MakeShape(xla::S32, {}),
- // The output has the shape of b, with one row updated each iteration.
- b_shape,
- // The coefficient matrix a is a loop invariant.
- a_shape,
- // The right-hand-side matrix b is a loop invariant.
- b_shape};
- xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(tuple_shapes);
- auto init_i = xla::ConstantR0<int32>(builder, transpose_a ? 0 : n - 1);
- auto init = xla::Tuple(builder, {init_i, output, a, b});
-
- // Construct the loop condition function,
- // def cond_fun(loop_carry):
- // i, output, a, b = loop_carry
- // return i < n if transpose_a else i >= 0
- std::unique_ptr<xla::XlaBuilder> condb =
- builder->CreateSubBuilder("TriangularSolveRightLookingWhileCond");
- {
- auto i = xla::GetTupleElement(
- xla::Parameter(condb.get(), 0, tuple_shape,
- "TriangularSolveRightLookingWhileTuple"),
- 0);
- if (transpose_a) {
- xla::Lt(i, xla::ConstantR0<int32>(condb.get(), n));
- } else {
- xla::Ge(i, xla::ConstantR0<int32>(condb.get(), 0));
- }
- }
- TF_ASSIGN_OR_RETURN(auto cond, condb->Build());
-
- // Construct the loop body function,
- // def body_fun(loop_carry):
- // i, output, a, b = loop_carry
- // if transpose_a:
- // a_row = np.swapaxes(a[..., :, i:i+1], -1, -2)
- // else:
- // a_row = a[..., :, i:i+1]
- // result_row = b[..., :, i:i+1] - np.matmul(output, a_row)
- // output[..., :, i:i+1] = result_row / a[..., i:i+1, i:i+1]
- // if transpose_a:
- // return (i - 1, output, a, b)
- // else:
- // return (i + 1, output, a, b)
- // We have to do some extra FLOPs propagating zeros in the matrix multiply
- // because we can't have the size of its arguments depend on the loop
- // counter.
- std::unique_ptr<xla::XlaBuilder> bodyb =
- builder->CreateSubBuilder("TriangularSolveRightLookingWhileBody");
- {
- auto input_tuple = xla::Parameter(
- bodyb.get(), 0, tuple_shape, "TriangularSolveRightLookingWhileTuple");
-
- // i, output, a, b = loop_carry
- auto i = xla::GetTupleElement(input_tuple, 0);
- auto body_out = xla::GetTupleElement(input_tuple, 1);
- auto body_a = xla::GetTupleElement(input_tuple, 2);
- auto body_b = xla::GetTupleElement(input_tuple, 3);
- auto zero = xla::ConstantR0<int32>(bodyb.get(), 0);
-
- // result = b - np.matmul(output, a)
- // result_row = result[..., :, i:i+1]
- auto body_b_slice = DynamicSliceInMinorDims(body_b, {zero, i}, {m, 1});
- xla::XlaOp a_slice;
- if (transpose_a) {
- a_slice = DynamicSliceInMinorDims(body_a, {i, zero}, {1, n});
- } else {
- a_slice = DynamicSliceInMinorDims(body_a, {zero, i}, {n, 1});
- }
- auto b_update = body_b_slice - BatchDot(body_out, a_slice,
- /*transpose_x=*/false,
- /*transpose_y=*/transpose_a,
- /*conjugate_x=*/false,
- /*conjugate_y=*/conjugate_a);
-
- // body_out[..., :, i:i+1] = b_update / a[..., i:i+1, i:i+1]
- auto a_ii = DynamicSliceInMinorDims(body_a, {i, i}, {1, 1});
- auto a_ii_conj = MaybeConjugate(a_ii, conjugate_a);
- body_out = DynamicUpdateSliceInMinorDims(body_out, b_update / a_ii_conj,
- {zero, i});
-
- // if transpose_a:
- // return (i + 1, body_out, a, b)
- // else:
- // return (i - 1, body_out, a, b)
- auto next_i = xla::Add(
- i, xla::ConstantR0<int32>(bodyb.get(), transpose_a ? 1 : -1));
- xla::Tuple(bodyb.get(), {next_i, body_out, body_a, body_b});
- }
- TF_ASSIGN_OR_RETURN(auto body, bodyb->Build());
-
- // Construct the While loop and return the result,
- // return while_loop(cond_fun, body_fun, init)[1]
- auto triangular_solve_left_looking_while = xla::While(cond, body, init);
- return xla::GetTupleElement(triangular_solve_left_looking_while, 1);
+ return x;
});
}
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve.h b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
index 80c2bc4c9c..2dce620ba8 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve.h
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve.h
@@ -59,13 +59,7 @@ namespace tensorflow {
// blocking is used.
xla::XlaOp TriangularSolve(xla::XlaOp a, xla::XlaOp b, bool left_side,
bool lower, bool transpose_a, bool conjugate_a,
- int64 block_size = 256);
-
-xla::XlaOp TriangularSolveLeftLooking(xla::XlaOp a, xla::XlaOp b,
- bool transpose_a, bool conjugate_a);
-
-xla::XlaOp TriangularSolveRightLooking(xla::XlaOp a, xla::XlaOp b,
- bool transpose_a, bool conjugate_a);
+ int64 block_size = 128);
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc b/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc
index f1bff6037b..a29496dec4 100644
--- a/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc
+++ b/tensorflow/compiler/tf2xla/lib/triangular_solve_test.cc
@@ -207,6 +207,28 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftLowerNotranspose) {
xla::ErrorSpec(1e-2, 1e-2));
}
+XLA_TEST_F(TriangularSolveTest, SimpleLeftLowerNotransposeIrregularblock) {
+ xla::XlaBuilder builder(TestName());
+
+ xla::XlaOp a, b;
+ auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
+ auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
+ TriangularSolve(a, b,
+ /*left_side=*/true, /*lower=*/true,
+ /*transpose_a=*/false, /*conjugate_a=*/false,
+ /*block_size=*/3);
+
+ xla::Array2D<float> expected({
+ {0.5, 1.0, 1.5},
+ {0.41666667, 0.33333333, 0.25},
+ {0.23148148, 0.18518519, 0.13888889},
+ {0.16835017, 0.13468013, 0.1010101},
+ });
+
+ ComputeAndCompareR2<float>(&builder, expected, {a_data.get(), b_data.get()},
+ xla::ErrorSpec(1e-2, 1e-2));
+}
+
XLA_TEST_F(TriangularSolveTest, SimpleLeftUpperTranspose) {
xla::XlaBuilder builder(TestName());
@@ -307,47 +329,5 @@ XLA_TEST_F(TriangularSolveTest, SimpleLeftUpperTransposeNoconjugate) {
xla::ErrorSpec(1e-2, 1e-2));
}
-XLA_TEST_F(TriangularSolveLeftLookingTest, Simple) {
- xla::XlaBuilder builder(TestName());
-
- xla::XlaOp a, b;
- auto a_data = CreateR2Parameter<float>(AValsLower(), 0, "a", &builder, &a);
- auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- TriangularSolveLeftLooking(a, b,
- /*transpose_a=*/false,
- /*conjugate_a=*/false);
-
- xla::Array2D<float> expected({
- {0.5, 1.0, 1.5},
- {0.41666667, 0.33333333, 0.25},
- {0.23148148, 0.18518519, 0.13888889},
- {0.16835017, 0.13468013, 0.1010101},
- });
-
- ComputeAndCompareR2<float>(&builder, expected, {a_data.get(), b_data.get()},
- xla::ErrorSpec(1e-2, 1e-2));
-}
-
-XLA_TEST_F(TriangularSolveLeftLookingTest, NonzeroUpperTriangle) {
- xla::XlaBuilder builder(TestName());
-
- xla::XlaOp a, b;
- auto a_data = CreateR2Parameter<float>(AValsFull(), 0, "a", &builder, &a);
- auto b_data = CreateR2Parameter<float>(BValsLeft(), 1, "b", &builder, &b);
- TriangularSolveLeftLooking(a, b,
- /*transpose_a=*/false,
- /*conjugate_a=*/false);
-
- xla::Array2D<float> expected({
- {0.5, 1.0, 1.5},
- {0.41666667, 0.33333333, 0.25},
- {0.23148148, 0.18518519, 0.13888889},
- {0.16835017, 0.13468013, 0.1010101},
- });
-
- ComputeAndCompareR2<float>(&builder, expected, {a_data.get(), b_data.get()},
- xla::ErrorSpec(1e-2, 1e-2));
-}
-
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc
index 319cbc74e9..cb47581e36 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler.cc
@@ -422,16 +422,18 @@ Status BuildComputation(
// assignment will be placed on this value, which will cause the resource
// update to be returned from the same device that provided the resource.
handle = xla::GetTupleElement(xla::Tuple(builder, {handle}), 0);
-
elems.push_back(handle);
}
}
*num_computation_outputs = elems.size();
- // Builds the XLA computation.
- if (always_return_tuple || elems.size() != 1) {
- xla::Tuple(builder, elems);
+ // Builds the XLA computation. We *always* form a tuple here to ensure that
+ // the output value is the last thing added into the XLA computation, even
+ // if there is only one output value.
+ auto tuple = xla::Tuple(builder, elems);
+ if (!always_return_tuple && elems.size() == 1) {
+ xla::GetTupleElement(tuple, 0);
}
builder->ClearOpMetadata();
diff --git a/tensorflow/compiler/tf2xla/xla_compiler_test.cc b/tensorflow/compiler/tf2xla/xla_compiler_test.cc
index 6f76816a86..2fb93be01d 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler_test.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler_test.cc
@@ -228,6 +228,58 @@ TEST_F(XlaCompilerTest, Simple) {
EXPECT_TRUE(xla::LiteralTestUtil::Equal(*expected_literal, *actual_literal));
}
+// Tests compilation of a graph where the _Retval node is not necessarily last
+// amongst the graph nodes in construction order, and always_return_tuple is
+// false. Regression test for bug where the wrong value was returned.
+TEST_F(XlaCompilerTest, OutOfOrderGraph) {
+ Scope scope = Scope::NewRootScope().ExitOnError();
+ auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
+ auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
+ // The _Retval node is not last in construction order.
+ auto d = ops::_Retval(scope.WithOpName("D"), a, 0);
+ auto c = ops::Add(scope.WithOpName("C"), a, b);
+
+ std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
+ TF_ASSERT_OK(scope.ToGraph(graph.get()));
+
+ // Builds a description of the arguments.
+ std::vector<XlaCompiler::Argument> args(2);
+ args[0].kind = XlaCompiler::Argument::kParameter;
+ args[0].type = DT_INT32;
+ args[0].shape = TensorShape({2});
+ args[1].kind = XlaCompiler::Argument::kParameter;
+ args[1].type = DT_INT32;
+ args[1].shape = TensorShape({2});
+
+ // Compiles the graph.
+ XlaCompiler compiler(DefaultOptions());
+
+ XlaCompiler::CompileOptions compile_options;
+ compile_options.always_return_tuple = false;
+ XlaCompiler::CompilationResult result;
+ TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
+ args, &result));
+
+ // Tests that the generated computation works.
+ std::unique_ptr<xla::Literal> param0_literal =
+ xla::LiteralUtil::CreateR1<int32>({7, 42});
+ std::unique_ptr<xla::Literal> param1_literal =
+ xla::LiteralUtil::CreateR1<int32>({-3, 101});
+ std::unique_ptr<xla::GlobalData> param0_data =
+ client_->TransferToServer(*param0_literal).ConsumeValueOrDie();
+ std::unique_ptr<xla::GlobalData> param1_data =
+ client_->TransferToServer(*param1_literal).ConsumeValueOrDie();
+
+ std::unique_ptr<xla::GlobalData> actual =
+ client_
+ ->Execute(*result.computation, {param0_data.get(), param1_data.get()})
+ .ConsumeValueOrDie();
+ std::unique_ptr<xla::Literal> actual_literal =
+ client_->Transfer(*actual).ConsumeValueOrDie();
+
+ EXPECT_TRUE(xla::LiteralTestUtil::Equal(*param0_literal, *actual_literal));
+}
+
TEST_F(XlaCompilerTest, HasSaneErrorOnNonCompileTimeConstantInputToReshape) {
// Builds a graph that adds reshapes a tensor, but with the shape not
// statically known.
diff --git a/tensorflow/compiler/tf2xla/xla_op_registry.h b/tensorflow/compiler/tf2xla/xla_op_registry.h
index 2d4593ea49..fc14834ca6 100644
--- a/tensorflow/compiler/tf2xla/xla_op_registry.h
+++ b/tensorflow/compiler/tf2xla/xla_op_registry.h
@@ -279,7 +279,7 @@ class XlaOpRegistrar {
#define REGISTER_XLA_OP_UNIQ(CTR, BUILDER, OP) \
static ::tensorflow::XlaOpRegistrar xla_op_registrar__body__##CTR##__object( \
- XlaOpRegistrationBuilder::BUILDER.Build( \
+ ::tensorflow::XlaOpRegistrationBuilder::BUILDER.Build( \
[](::tensorflow::OpKernelConstruction* context) \
-> ::tensorflow::OpKernel* { return new OP(context); }));
diff --git a/tensorflow/compiler/xla/client/client.cc b/tensorflow/compiler/xla/client/client.cc
index 3a157c69cd..8e54311bad 100644
--- a/tensorflow/compiler/xla/client/client.cc
+++ b/tensorflow/compiler/xla/client/client.cc
@@ -409,8 +409,10 @@ StatusOr<string> Client::ExecutionStatsAsString(
return string("[Execution Statistics] not available.");
}
-StatusOr<ChannelHandle> Client::CreateChannelHandle() {
+StatusOr<ChannelHandle> Client::CreateChannelHandleByType(
+ ChannelHandle::ChannelType type) {
CreateChannelHandleRequest request;
+ request.set_channel_type(type);
CreateChannelHandleResponse response;
VLOG(1) << "making create channel handle request";
@@ -424,4 +426,16 @@ StatusOr<ChannelHandle> Client::CreateChannelHandle() {
return response.channel();
}
+StatusOr<ChannelHandle> Client::CreateChannelHandle() {
+ return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_DEVICE);
+}
+
+StatusOr<ChannelHandle> Client::CreateHostToDeviceChannelHandle() {
+ return CreateChannelHandleByType(ChannelHandle::HOST_TO_DEVICE);
+}
+
+StatusOr<ChannelHandle> Client::CreateDeviceToHostChannelHandle() {
+ return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_HOST);
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/client.h b/tensorflow/compiler/xla/client/client.h
index 69d4d300ca..d751e183dd 100644
--- a/tensorflow/compiler/xla/client/client.h
+++ b/tensorflow/compiler/xla/client/client.h
@@ -178,10 +178,15 @@ class Client {
StatusOr<std::unique_ptr<ProgramShape>> GetComputationShape(
const XlaComputation& computation);
- // Creates a channel handle that can be used to transfer data between
- // two computations via a pair of Send and Recv instructions.
+ // Creates a channel handle that can be used to transfer data between two
+ // computations on different devices via a pair of Send and Recv instructions.
StatusOr<ChannelHandle> CreateChannelHandle();
+ // Create a channel for communicating with the host via a SendtoHost or
+ // RecvFromHost operation.
+ StatusOr<ChannelHandle> CreateHostToDeviceChannelHandle();
+ StatusOr<ChannelHandle> CreateDeviceToHostChannelHandle();
+
StatusOr<XlaComputation> LoadSnapshot(const HloSnapshot& module);
ServiceInterface* stub() { return stub_; }
@@ -192,6 +197,9 @@ class Client {
StatusOr<string> ExecutionStatsAsString(const XlaComputation& computation,
const ExecutionProfile& profile);
+ StatusOr<ChannelHandle> CreateChannelHandleByType(
+ ChannelHandle::ChannelType type);
+
ServiceInterface* stub_; // Stub that this client is connected on.
TF_DISALLOW_COPY_AND_ASSIGN(Client);
diff --git a/tensorflow/compiler/xla/client/lib/BUILD b/tensorflow/compiler/xla/client/lib/BUILD
index 6933e9a838..77ba474cf6 100644
--- a/tensorflow/compiler/xla/client/lib/BUILD
+++ b/tensorflow/compiler/xla/client/lib/BUILD
@@ -97,9 +97,12 @@ cc_library(
srcs = ["numeric.cc"],
hdrs = ["numeric.h"],
deps = [
+ ":arithmetic",
+ ":constants",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto",
"//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/core:lib",
],
)
@@ -119,6 +122,21 @@ xla_test(
)
cc_library(
+ name = "prng",
+ srcs = ["prng.cc"],
+ hdrs = ["prng.h"],
+ deps = [
+ ":constants",
+ ":math",
+ ":numeric",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_library(
name = "testing",
srcs = ["testing.cc"],
hdrs = ["testing.h"],
diff --git a/tensorflow/compiler/xla/client/lib/arithmetic.cc b/tensorflow/compiler/xla/client/lib/arithmetic.cc
index 978fc40f34..de1d785e19 100644
--- a/tensorflow/compiler/xla/client/lib/arithmetic.cc
+++ b/tensorflow/compiler/xla/client/lib/arithmetic.cc
@@ -94,16 +94,18 @@ XlaComputation CreateScalarMinComputation(PrimitiveType type,
});
}
-XlaComputation CreateScalarAndComputation(XlaBuilder* builder) {
+XlaComputation CreateScalarAndComputation(PrimitiveType type,
+ XlaBuilder* builder) {
return CreateScalarComputation(
- "and", PRED, builder,
+ "and", type, builder,
[](XlaBuilder* b, const XlaOp& lhs, const XlaOp& rhs) {
return And(lhs, rhs);
});
}
-XlaComputation CreateScalarOrComputation(XlaBuilder* builder) {
- return CreateScalarComputation("or", PRED, builder,
+XlaComputation CreateScalarOrComputation(PrimitiveType type,
+ XlaBuilder* builder) {
+ return CreateScalarComputation("or", type, builder,
[](XlaBuilder* b, const XlaOp& lhs,
const XlaOp& rhs) { return Or(lhs, rhs); });
}
@@ -112,7 +114,7 @@ XlaOp Any(XlaOp predicates) {
XlaBuilder* builder = predicates.builder();
return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
auto f = ConstantR0<bool>(builder, false);
- XlaComputation logical_or = CreateScalarOrComputation(builder);
+ XlaComputation logical_or = CreateScalarOrComputation(PRED, builder);
TF_ASSIGN_OR_RETURN(const Shape& predicates_shape,
builder->GetShape(predicates));
std::vector<int64> all_dimensions(ShapeUtil::Rank(predicates_shape));
diff --git a/tensorflow/compiler/xla/client/lib/arithmetic.h b/tensorflow/compiler/xla/client/lib/arithmetic.h
index d0b916e8c8..8367e09450 100644
--- a/tensorflow/compiler/xla/client/lib/arithmetic.h
+++ b/tensorflow/compiler/xla/client/lib/arithmetic.h
@@ -45,10 +45,12 @@ XlaComputation CreateScalarMinComputation(PrimitiveType type,
XlaBuilder* builder);
// Creates a scalar logical AND computation and returns it.
-XlaComputation CreateScalarAndComputation(XlaBuilder* builder);
+XlaComputation CreateScalarAndComputation(PrimitiveType type,
+ XlaBuilder* builder);
// Creates a scalar logical OR computation and returns it.
-XlaComputation CreateScalarOrComputation(XlaBuilder* builder);
+XlaComputation CreateScalarOrComputation(PrimitiveType type,
+ XlaBuilder* builder);
// Returns whether any predicate in "predicates" is set.
//
diff --git a/tensorflow/compiler/xla/client/lib/math.cc b/tensorflow/compiler/xla/client/lib/math.cc
index 5587559040..0221de7672 100644
--- a/tensorflow/compiler/xla/client/lib/math.cc
+++ b/tensorflow/compiler/xla/client/lib/math.cc
@@ -25,11 +25,9 @@ XlaOp Sqrt(XlaOp operand) { return Pow(operand, ScalarLike(operand, 0.5)); }
XlaOp Rsqrt(XlaOp operand) { return Pow(operand, ScalarLike(operand, -0.5)); }
-XlaOp Square(XlaOp operand) { return Pow(operand, ScalarLike(operand, 2.0)); }
+XlaOp Square(XlaOp operand) { return operand * operand; }
-XlaOp Reciprocal(XlaOp operand) {
- return Pow(operand, ScalarLike(operand, -1.0));
-}
+XlaOp Reciprocal(XlaOp operand) { return ScalarLike(operand, 1.0) / operand; }
namespace {
@@ -149,4 +147,158 @@ XlaOp ErfInv(XlaOp x) {
});
}
+namespace {
+// Coefficients for the Lanczos approximation of the gamma function. The
+// coefficients are uniquely determined by the choice of g and n (kLanczosGamma
+// and kLanczosCoefficients.size() + 1). The coefficients below correspond to
+// [7, 9]. [5, 7], [7, 9], [9, 10], and [607/128.0, 15] were evaluated and [7,
+// 9] seemed to be the least sensitive to the quality of the log function. In
+// particular, [5, 7] is the only choice where -1.5e-5 <= lgamma(2) <= 1.5e-5
+// for a particularly inaccurate log function.
+static constexpr double kLanczosGamma = 7; // aka g
+static constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478;
+static constexpr std::array<double, 8> kLanczosCoefficients = {
+ 676.520368121885098567009190444019, -1259.13921672240287047156078755283,
+ 771.3234287776530788486528258894, -176.61502916214059906584551354,
+ 12.507343278686904814458936853, -0.13857109526572011689554707,
+ 9.984369578019570859563e-6, 1.50563273514931155834e-7};
+} // namespace
+
+// Compute the Lgamma function using Lanczos' approximation from "A Precision
+// Approximation of the Gamma Function". SIAM Journal on Numerical Analysis
+// series B. Vol. 1:
+// lgamma(z + 1) = (log(2) + log(pi)) / 2 + (z + 1/2) * log(t(z)) - t(z) + A(z)
+// t(z) = z + kLanczosGamma + 1/2
+// A(z) = kBaseLanczosCoeff + sigma(k = 1, n, kLanczosCoefficients[i] / (z + k))
+XlaOp Lgamma(XlaOp input) {
+ XlaOp one_half = ScalarLike(input, 0.5);
+ XlaOp one = ScalarLike(input, 1);
+
+ XlaOp pi = ScalarLike(input, M_PI);
+ XlaOp log_pi = ScalarLike(input, std::log(M_PI));
+ XlaOp log_sqrt_two_pi = ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2);
+
+ XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);
+ XlaOp log_lanczos_gamma_plus_one_half =
+ ScalarLike(input, std::log(kLanczosGamma + 0.5));
+
+ XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);
+
+ // If the input is less than 0.5 use Gauss's reflection formula:
+ // gamma(x) = pi / sin(pi * x) * gamma(1 - x)
+ XlaOp need_to_reflect = Lt(Real(input), one_half);
+ XlaOp z = Select(need_to_reflect, -input, input - one);
+
+ XlaOp x = base_lanczos_coeff;
+ for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
+ XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
+ XlaOp index = ScalarLike(input, i);
+ x = x + lanczos_coefficient / (z + index + one);
+ }
+
+ // To improve accuracy on platforms with less-precise log implementations,
+ // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on
+ // the device.
+ // log(t) = log(kLanczosGamma + 0.5 + z)
+ // = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5))
+ XlaOp t = lanczos_gamma_plus_one_half + z;
+ XlaOp log_t =
+ log_lanczos_gamma_plus_one_half + Log1p(z / lanczos_gamma_plus_one_half);
+
+ XlaOp log_y = log_sqrt_two_pi + (z + one_half) * log_t - t + Log(x);
+
+ XlaOp reflection = log_pi - Log(Sin(pi * input)) - log_y;
+ XlaOp result = Select(need_to_reflect, reflection, log_y);
+ return result;
+}
+
+// Compute the Digamma function using Lanczos' approximation from "A Precision
+// Approximation of the Gamma Function". SIAM Journal on Numerical Analysis
+// series B. Vol. 1:
+// digamma(z + 1) = log(t(z)) + A'(z) / A(z) - kLanczosGamma / t(z)
+// t(z) = z + kLanczosGamma + 1/2
+// A(z) = kBaseLanczosCoeff + sigma(k = 1, n, kLanczosCoefficients[i] / (z + k))
+// A'(z) = sigma(k = 1, n, kLanczosCoefficients[i] / (z + k) / (z + k))
+XlaOp Digamma(XlaOp input) {
+ XlaOp zero = ScalarLike(input, 0);
+ XlaOp one_half = ScalarLike(input, 0.5);
+ XlaOp one = ScalarLike(input, 1);
+
+ XlaOp pi = ScalarLike(input, M_PI);
+
+ XlaOp lanczos_gamma = ScalarLike(input, kLanczosGamma);
+ XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);
+ XlaOp log_lanczos_gamma_plus_one_half =
+ ScalarLike(input, std::log(kLanczosGamma + 0.5));
+
+ XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);
+
+ // If the input is less than 0.5 use Gauss's reflection formula:
+ // digamma(x) = digamma(1 - x) - pi * cot(pi * x)
+ XlaOp need_to_reflect = Lt(Real(input), one_half);
+ XlaOp z = Select(need_to_reflect, -input, input - one);
+
+ XlaOp num = zero;
+ XlaOp denom = base_lanczos_coeff;
+ for (int i = 0; i < kLanczosCoefficients.size(); ++i) {
+ XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
+ XlaOp index = ScalarLike(input, i);
+ num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));
+ denom = denom + lanczos_coefficient / (z + index + one);
+ }
+
+ // To improve accuracy on platforms with less-precise log implementations,
+ // compute log(lanczos_gamma_plus_one_half) at compile time and use log1p on
+ // the device.
+ // log(t) = log(kLanczosGamma + 0.5 + z)
+ // = log(kLanczosGamma + 0.5) + log1p(z / (kLanczosGamma + 0.5))
+ XlaOp t = lanczos_gamma_plus_one_half + z;
+ XlaOp log_t =
+ log_lanczos_gamma_plus_one_half + Log1p(z / lanczos_gamma_plus_one_half);
+
+ XlaOp y = log_t + num / denom - lanczos_gamma / t;
+ XlaOp reflection = y - pi * Cos(pi * input) / Sin(pi * input);
+ XlaOp result = Select(need_to_reflect, reflection, y);
+ return result;
+}
+
+// Trigonometric functions.
+
+// acos(x) = 2 * atan(sqrt(1 - x^2) / (1 + x))
+XlaOp Acos(XlaOp x) {
+ return ScalarLike(x, 2.0) *
+ Atan2(Sqrt(ScalarLike(x, 1.0) - x * x), ScalarLike(x, 1.0) + x);
+}
+
+// asin(x) = 2 * atan(x / (1 + sqrt(1 - x^2)))
+XlaOp Asin(XlaOp x) {
+ return ScalarLike(x, 2.0) *
+ Atan2(x, ScalarLike(x, 1.0) + Sqrt(ScalarLike(x, 1.0) - x * x));
+}
+
+XlaOp Atan(XlaOp x) { return Atan2(x, ScalarLike(x, 1.0)); }
+
+XlaOp Tan(XlaOp x) { return Sin(x) / Cos(x); }
+
+// Hyperbolic trigonometric functions.
+
+// acosh(x) = log(x + sqrt(x^2 - 1))
+// = log(x + sqrt((x+1)*(x-1)))
+XlaOp Acosh(XlaOp x) {
+ return Log(x + Sqrt((x + ScalarLike(x, 1.0)) * (x - ScalarLike(x, 1.0))));
+}
+
+// asinh(x) = log(x + sqrt(x^2 + 1))
+XlaOp Asinh(XlaOp x) { return Log(x + Sqrt(x * x + ScalarLike(x, 1.0))); }
+
+// atanh(x) = 0.5 * log((1 + x) / (1 - x))
+XlaOp Atanh(XlaOp x) {
+ return Log((ScalarLike(x, 1.0) + x) / (ScalarLike(x, 1.0) - x)) *
+ ScalarLike(x, 0.5);
+}
+
+XlaOp Cosh(XlaOp x) { return (Exp(x) + Exp(-x)) * ScalarLike(x, 0.5); }
+
+XlaOp Sinh(XlaOp x) { return (Exp(x) - Exp(-x)) * ScalarLike(x, 0.5); }
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/math.h b/tensorflow/compiler/xla/client/lib/math.h
index e7c8b50273..d003d529cc 100644
--- a/tensorflow/compiler/xla/client/lib/math.h
+++ b/tensorflow/compiler/xla/client/lib/math.h
@@ -46,6 +46,43 @@ XlaOp Erf(XlaOp x);
// Computes an approximation of the inverse of the error function.
XlaOp ErfInv(XlaOp x);
+// Computes an approximation of the lgamma function.
+XlaOp Lgamma(XlaOp input);
+
+// Computes an approximation of the digamma function.
+XlaOp Digamma(XlaOp input);
+
+// Trigonometric functions
+
+// Computes the arc cosine of 'x'.
+XlaOp Acos(XlaOp x);
+
+// Computes the arc sine of 'x'.
+XlaOp Asin(XlaOp x);
+
+// Computes the arc tangent of 'x'.
+XlaOp Atan(XlaOp x);
+
+// Computes the tangent of 'x'.
+XlaOp Tan(XlaOp x);
+
+// Hyperbolic trigonometric functions
+
+// Computes the inverse hyperbolic cosine of 'x'.
+XlaOp Acosh(XlaOp x);
+
+// Computes the inverse hyperbolic sine of 'x'.
+XlaOp Asinh(XlaOp x);
+
+// Computes the inverse hyperbolic tangent of 'x'.
+XlaOp Atanh(XlaOp x);
+
+// Computes the hyperbolic cosine of 'x'.
+XlaOp Cosh(XlaOp x);
+
+// Computes the hyperbolic sine of 'x'.
+XlaOp Sinh(XlaOp x);
+
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_MATH_H_
diff --git a/tensorflow/compiler/xla/client/lib/math_test.cc b/tensorflow/compiler/xla/client/lib/math_test.cc
index 068cd2e586..1df287d7db 100644
--- a/tensorflow/compiler/xla/client/lib/math_test.cc
+++ b/tensorflow/compiler/xla/client/lib/math_test.cc
@@ -82,5 +82,59 @@ XLA_TEST_F(MathTest, SqrtSixValues) {
std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
+
+XLA_TEST_F(MathTest, Lgamma) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5,
+ 2.5, -1.5, -3.5, -5.5});
+ Lgamma(x);
+
+ std::vector<float> expected = {
+ 0,
+ 0,
+ static_cast<float>(std::log(2)),
+ static_cast<float>(std::log(6)),
+ static_cast<float>(std::log(24)),
+ static_cast<float>(std::log(120)),
+ static_cast<float>(std::log(M_PI) / 2),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(2)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(4) + std::log(3)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(3) + std::log(4)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(105) + std::log(16)),
+ static_cast<float>(std::log(M_PI) / 2 - std::log(10395) + std::log(64))};
+ error_spec_ = ErrorSpec{0.001};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
+XLA_TEST_F(MathTest, Digamma) {
+ XlaBuilder builder(TestName());
+ auto x = ConstantR1<float>(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125,
+ 2.0, 3.0, 4.0, 6.0, 8.0, 9.0});
+ Digamma(x);
+
+ constexpr double euler_mascheroni =
+ 0.57721566490153286060651209008240243104215933593992;
+ std::vector<float> expected = {
+ static_cast<float>(-euler_mascheroni),
+ static_cast<float>(-2 * std::log(2) - euler_mascheroni),
+ static_cast<float>(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 -
+ euler_mascheroni),
+ static_cast<float>(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni),
+ static_cast<float>(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) -
+ 3 * std::log(3) / 2 - euler_mascheroni),
+ static_cast<float>(
+ -M_PI / 2 - 4 * std::log(2) -
+ (M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) /
+ std::sqrt(2) -
+ euler_mascheroni),
+ static_cast<float>(1 - euler_mascheroni),
+ static_cast<float>(1.5 - euler_mascheroni),
+ static_cast<float>(11 / 6.0 - euler_mascheroni),
+ static_cast<float>(137 / 60.0 - euler_mascheroni),
+ static_cast<float>(363 / 140.0 - euler_mascheroni),
+ static_cast<float>(761 / 280.0 - euler_mascheroni)};
+ ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/numeric.cc b/tensorflow/compiler/xla/client/lib/numeric.cc
index fd4e8fc390..1c91237ae1 100644
--- a/tensorflow/compiler/xla/client/lib/numeric.cc
+++ b/tensorflow/compiler/xla/client/lib/numeric.cc
@@ -13,11 +13,14 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/client/lib/numeric.h"
-
#include <numeric>
#include <vector>
+#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/core/lib/gtl/array_slice.h"
+
namespace xla {
namespace {
@@ -28,7 +31,7 @@ XlaOp MakeIota(XlaBuilder* builder, int64 size) {
for (int64 i = 0; i < size; ++i) {
values[i] = static_cast<T>(i);
}
- return xla::ConstantR1<T>(builder, values);
+ return ConstantR1<T>(builder, values);
}
} // namespace
@@ -76,4 +79,59 @@ XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64 m,
return ConvertElementType(indicator, type);
}
+XlaOp GetMatrixDiagonal(XlaOp x) {
+ XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
+ const int64 n_dims = ShapeUtil::Rank(shape);
+ TF_RET_CHECK(n_dims >= 2);
+ const int64 m = shape.dimensions(n_dims - 2);
+ const int64 n = shape.dimensions(n_dims - 1);
+ tensorflow::gtl::ArraySlice<int64> major_dims(
+ AsInt64Slice(shape.dimensions()), /*pos=*/0, /*len=*/n_dims - 2);
+ auto a = Iota(builder, U32, n);
+ auto b = Iota(builder, U32, m);
+ auto indicator = Eq(b, Broadcast(a, {m}), /*broadcast_dimensions=*/{0});
+ auto mask = Broadcast(indicator, major_dims);
+
+ // TPUs don't support S64 add reduction at the moment. But fortunately
+ // OR-reductions work just as well for integers.
+ XlaComputation reducer =
+ primitive_util::IsIntegralType(shape.element_type())
+ ? CreateScalarOrComputation(shape.element_type(), builder)
+ : CreateScalarAddComputation(shape.element_type(), builder);
+
+ return Reduce(Select(mask, x, Zeros(builder, shape)), ScalarLike(x, 0),
+ reducer, {m >= n ? n_dims - 2 : n_dims - 1});
+ });
+}
+
+XlaOp Triangle(XlaOp x, bool lower) {
+ XlaBuilder* builder = x.builder();
+ return builder->ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
+ const int64 n_dims = ShapeUtil::Rank(shape);
+ TF_RET_CHECK(n_dims >= 2);
+ const int64 m = shape.dimensions(n_dims - 2);
+ const int64 n = shape.dimensions(n_dims - 1);
+ tensorflow::gtl::ArraySlice<int64> major_dims(
+ AsInt64Slice(shape.dimensions()), /*pos=*/0, /*len=*/n_dims - 2);
+ auto a = Iota(builder, U32, n);
+ auto b = Iota(builder, U32, m);
+ xla::XlaOp indicator;
+ if (lower) {
+ indicator = Ge(b, Broadcast(a, {m}), /*broadcast_dimensions=*/{0});
+ } else {
+ indicator = Le(b, Broadcast(a, {m}), /*broadcast_dimensions=*/{0});
+ }
+ auto mask = Broadcast(indicator, major_dims);
+
+ return Select(mask, x, Zeros(builder, shape));
+ });
+}
+
+XlaOp UpperTriangle(XlaOp x) { return Triangle(x, false); }
+
+XlaOp LowerTriangle(XlaOp x) { return Triangle(x, true); }
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/numeric.h b/tensorflow/compiler/xla/client/lib/numeric.h
index 79707007b2..212f658313 100644
--- a/tensorflow/compiler/xla/client/lib/numeric.h
+++ b/tensorflow/compiler/xla/client/lib/numeric.h
@@ -29,6 +29,20 @@ XlaOp Iota(XlaBuilder* builder, PrimitiveType type, int64 size);
// else.
XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64 m, int64 n);
+// Get the diagonals of the last two dimensions. If 'x' has shape
+// [..., M, N], then the output has shape [..., min(M, N)], containing the
+// diagonal elements (i.e., with indices [..., i, i]).
+XlaOp GetMatrixDiagonal(XlaOp x);
+
+// Get the upper or lower triangle part of the last two dimensions
+XlaOp Triangle(XlaOp x, bool lower);
+
+// Get the upper triangle part of the last two dimensions
+XlaOp UpperTriangle(XlaOp x);
+
+// Get the lower triangle part of the last two dimensions
+XlaOp LowerTriangle(XlaOp x);
+
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_NUMERIC_H_
diff --git a/tensorflow/compiler/xla/client/lib/numeric_test.cc b/tensorflow/compiler/xla/client/lib/numeric_test.cc
index bc8a73e9d7..f56cadc547 100644
--- a/tensorflow/compiler/xla/client/lib/numeric_test.cc
+++ b/tensorflow/compiler/xla/client/lib/numeric_test.cc
@@ -24,8 +24,15 @@ limitations under the License.
namespace xla {
namespace {
-using NumericTest = ClientLibraryTestBase;
+class NumericTest : public ClientLibraryTestBase {
+ protected:
+ template <typename T>
+ void TestMatrixDiagonal();
+};
+// TODO(b/64798317): Delete this test case once xla::IotaGen is converted to
+// xla::Iota. This test is already implemented for xla::IotaGen in
+// xla/tests/iota_test.cc.
XLA_TEST_F(NumericTest, Iota) {
XlaBuilder builder(TestName());
Iota(&builder, S32, 10);
@@ -33,5 +40,39 @@ XLA_TEST_F(NumericTest, Iota) {
ComputeAndCompareR1<int32>(&builder, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, {});
}
+XLA_TEST_F(NumericTest, Triangle) {
+ XlaBuilder builder(TestName());
+ Array3D<int32> input(2, 3, 4);
+ input.FillIota(0);
+
+ XlaOp a;
+ auto a_data = CreateR3Parameter<int32>(input, 0, "a", &builder, &a);
+ LowerTriangle(a);
+ Array3D<int32> expected({{{0, 0, 0, 0}, {4, 5, 0, 0}, {8, 9, 10, 0}},
+ {{12, 0, 0, 0}, {16, 17, 0, 0}, {20, 21, 22, 0}}});
+
+ ComputeAndCompareR3<int32>(&builder, expected, {a_data.get()});
+}
+
+template <typename T>
+void NumericTest::TestMatrixDiagonal() {
+ XlaBuilder builder("GetMatrixDiagonal");
+ Array3D<T> input(2, 3, 4);
+ input.FillIota(0);
+
+ XlaOp a;
+ auto a_data = CreateR3Parameter<T>(input, 0, "a", &builder, &a);
+ GetMatrixDiagonal(a);
+ Array2D<T> expected({{0, 5, 10}, {12, 17, 22}});
+
+ ComputeAndCompareR2<T>(&builder, expected, {a_data.get()});
+}
+
+XLA_TEST_F(NumericTest, GetMatrixDiagonal_S32) { TestMatrixDiagonal<int32>(); }
+
+XLA_TEST_F(NumericTest, GetMatrixDiagonal_S64) { TestMatrixDiagonal<int64>(); }
+
+XLA_TEST_F(NumericTest, GetMatrixDiagonal_F32) { TestMatrixDiagonal<float>(); }
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/prng.cc b/tensorflow/compiler/xla/client/lib/prng.cc
new file mode 100644
index 0000000000..299a6ac2b6
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/prng.cc
@@ -0,0 +1,150 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cmath>
+
+#include "tensorflow/compiler/xla/client/lib/constants.h"
+#include "tensorflow/compiler/xla/client/lib/math.h"
+#include "tensorflow/compiler/xla/client/lib/numeric.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/util.h"
+#include "tensorflow/core/lib/core/casts.h"
+
+namespace xla {
+namespace {
+
+// Rotates a 32-bit integer 'v' left by 'distance' bits.
+XlaOp RotateLeftS32(XlaOp v, int distance) {
+ return (v << ConstantR0<int32>(v.builder(), distance)) |
+ ShiftRightLogical(v, ConstantR0<int32>(v.builder(), 32 - distance));
+}
+
+using ThreeFry2x32State = std::array<XlaOp, 2>;
+
+// Implements the ThreeFry counter-based PRNG algorithm.
+// Salmon et al. SC 2011. Parallel random numbers: as easy as 1, 2, 3.
+// http://www.thesalmons.org/john/random123/papers/random123sc11.pdf
+ThreeFry2x32State ThreeFry2x32(ThreeFry2x32State input, ThreeFry2x32State key) {
+ XlaBuilder* builder = input[0].builder();
+ // Rotation distances specified by the Threefry2x32 algorithm.
+ constexpr std::array<int, 8> rotations = {13, 15, 26, 6, 17, 29, 16, 24};
+ ThreeFry2x32State x;
+
+ std::array<XlaOp, 3> ks;
+ // 0x1BD11BDA is a parity constant specified by the ThreeFry2x32 algorithm.
+ ks[2] = ConstantR0<int32>(builder, 0x1BD11BDA);
+ for (int i = 0; i < 2; ++i) {
+ ks[i] = key[i];
+ x[i] = input[i];
+ ks[2] = ks[2] ^ key[i];
+ }
+
+ x[0] = x[0] + ks[0];
+ x[1] = x[1] + ks[1];
+
+ // Performs a single round of the Threefry2x32 algorithm, with a rotation
+ // amount 'rotation'.
+ auto round = [builder](ThreeFry2x32State v, int rotation) {
+ v[0] = v[0] + v[1];
+ v[1] = RotateLeftS32(v[1], rotation);
+ v[1] = v[0] ^ v[1];
+ return v;
+ };
+
+ // There are no known statistical flaws with 13 rounds of Threefry2x32.
+ // We are conservative and use 20 rounds.
+ x = round(x, rotations[0]);
+ x = round(x, rotations[1]);
+ x = round(x, rotations[2]);
+ x = round(x, rotations[3]);
+ x[0] = x[0] + ks[1];
+ x[1] = x[1] + ks[2] + ConstantR0<int32>(builder, 1);
+
+ x = round(x, rotations[4]);
+ x = round(x, rotations[5]);
+ x = round(x, rotations[6]);
+ x = round(x, rotations[7]);
+ x[0] = x[0] + ks[2];
+ x[1] = x[1] + ks[0] + ConstantR0<int32>(builder, 2);
+
+ x = round(x, rotations[0]);
+ x = round(x, rotations[1]);
+ x = round(x, rotations[2]);
+ x = round(x, rotations[3]);
+ x[0] = x[0] + ks[0];
+ x[1] = x[1] + ks[1] + ConstantR0<int32>(builder, 3);
+
+ x = round(x, rotations[4]);
+ x = round(x, rotations[5]);
+ x = round(x, rotations[6]);
+ x = round(x, rotations[7]);
+ x[0] = x[0] + ks[1];
+ x[1] = x[1] + ks[2] + ConstantR0<int32>(builder, 4);
+
+ x = round(x, rotations[0]);
+ x = round(x, rotations[1]);
+ x = round(x, rotations[2]);
+ x = round(x, rotations[3]);
+ x[0] = x[0] + ks[2];
+ x[1] = x[1] + ks[0] + ConstantR0<int32>(builder, 5);
+
+ return x;
+}
+
+} // namespace
+
+XlaOp StatelessRngUniform(std::array<XlaOp, 2> seeds, const Shape& shape,
+ XlaOp minval, XlaOp maxval) {
+ XlaBuilder* builder = seeds[0].builder();
+ if (shape.element_type() != F32) {
+ return builder->ReportError(Unimplemented(
+ "Types other than F32 are not implemented by StatelessRngUniform."));
+ }
+ ThreeFry2x32State key = seeds;
+ const int64 size = ShapeUtil::ElementsIn(shape);
+
+ const int64 half_size = CeilOfRatio<int64>(size, 2);
+ const bool size_is_odd = (half_size * 2 != size);
+
+ // Fill the generator inputs with unique counter values.
+ ThreeFry2x32State inputs;
+ inputs[0] = Iota(builder, S32, half_size);
+ inputs[1] = inputs[0] + ConstantR0<int32>(builder, half_size);
+ ThreeFry2x32State outputs = ThreeFry2x32(inputs, key);
+
+ if (size_is_odd) {
+ outputs[1] = Slice(outputs[1], {0}, {half_size - 1}, {1});
+ }
+
+ auto bits = Reshape(ConcatInDim(builder, outputs, 0),
+ AsInt64Slice(shape.dimensions()));
+
+ // Form 23 random mantissa bits, with a leading 1 bit. The leading 1 bit
+ // forces the random bits into the mantissa.
+ constexpr int kFloatBits = 32;
+ constexpr int kMantissaBits = 23;
+ bits = ShiftRightLogical(
+ bits, ConstantR0<int32>(builder, kFloatBits - kMantissaBits)) |
+ ConstantR0<int32>(builder, tensorflow::bit_cast<int32>(1.0f));
+ auto floats = BitcastConvertType(bits, F32);
+
+ // We have a floating point number in the range [1.0, 2.0).
+ // Subtract 1.0f to shift to the range [0.0, 1.0)
+ floats = floats - ConstantR0<float>(builder, 1.0f);
+ // Multiply and add to shift to the range [minval, maxval).
+ return floats * (maxval - minval) + minval;
+}
+
+} // namespace xla
diff --git a/tensorflow/compiler/xla/client/lib/prng.h b/tensorflow/compiler/xla/client/lib/prng.h
new file mode 100644
index 0000000000..ac86390239
--- /dev/null
+++ b/tensorflow/compiler/xla/client/lib/prng.h
@@ -0,0 +1,34 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
+#define TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
+
+#include <array>
+
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+
+namespace xla {
+
+// Returns a tensor containing 'shape' random values uniformly distributed in
+// the range [minval, maxval). Requires 2 32-bit integer seeds.
+// Currently only 'shape's of type F32 are implemented.
+XlaOp StatelessRngUniform(std::array<XlaOp, 2> seeds, const Shape& shape,
+ XlaOp minval, XlaOp maxval);
+
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_CLIENT_LIB_PRNG_H_
diff --git a/tensorflow/compiler/xla/client/xla_client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
index aac7df4383..a9a4b3bc5d 100644
--- a/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_client/xla_builder.cc
@@ -1845,10 +1845,6 @@ XlaOp XlaBuilder::CrossReplicaSum(
tensorflow::gtl::ArraySlice<int64> replica_group_ids,
const tensorflow::gtl::optional<ChannelHandle>& channel_id) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
- if (channel_id.has_value()) {
- return Unimplemented("channel_id is not supported in AllReduce");
- }
-
HloInstructionProto instr;
TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
TF_ASSIGN_OR_RETURN(
@@ -1858,6 +1854,10 @@ XlaOp XlaBuilder::CrossReplicaSum(
instr.add_replica_group_ids(replica_group_id);
}
+ if (channel_id.has_value()) {
+ instr.set_all_reduce_id(channel_id->handle());
+ }
+
AddCalledComputation(computation, &instr);
return AddInstruction(std::move(instr), HloOpcode::kCrossReplicaSum,
@@ -1940,28 +1940,17 @@ void XlaBuilder::Send(const XlaOp& operand, const ChannelHandle& handle) {
TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
HloOpcode::kAfterAll, {}));
- // Send instruction produces a tuple of {aliased operand, U32 context,
- // token}.
- HloInstructionProto send_instr;
- TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(operand));
- *send_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
- {shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()});
- send_instr.set_channel_id(handle.handle());
- TF_ASSIGN_OR_RETURN(XlaOp send,
- AddInstruction(std::move(send_instr), HloOpcode::kSend,
- {operand, token}));
-
- HloInstructionProto send_done_instr;
- *send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape();
- send_done_instr.set_channel_id(handle.handle());
- return AddInstruction(std::move(send_done_instr), HloOpcode::kSendDone,
- {send});
+ return SendWithToken(operand, token, handle);
});
}
XlaOp XlaBuilder::SendWithToken(const XlaOp& operand, const XlaOp& token,
const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ if (handle.type() != ChannelHandle::DEVICE_TO_DEVICE) {
+ return InvalidArgument("Send must use a device-to-device channel");
+ }
+
// Send instruction produces a tuple of {aliased operand, U32 context,
// token}.
HloInstructionProto send_instr;
@@ -1992,6 +1981,27 @@ XlaOp XlaBuilder::Recv(const Shape& shape, const ChannelHandle& handle) {
TF_ASSIGN_OR_RETURN(XlaOp token, AddInstruction(std::move(token_instr),
HloOpcode::kAfterAll, {}));
+ XlaOp recv = RecvWithToken(token, shape, handle);
+
+ // The RecvDone instruction produces a tuple of the data and a token
+ // type. Return XLA op containing the data.
+ // TODO(b/80000000): Remove this when clients have been updated to handle
+ // tokens.
+ HloInstructionProto recv_data;
+ *recv_data.mutable_shape() = shape;
+ recv_data.set_tuple_index(0);
+ return AddInstruction(std::move(recv_data), HloOpcode::kGetTupleElement,
+ {recv});
+ });
+}
+
+XlaOp XlaBuilder::RecvWithToken(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ if (handle.type() != ChannelHandle::DEVICE_TO_DEVICE) {
+ return InvalidArgument("Recv must use a device-to-device channel");
+ }
+
// Recv instruction produces a tuple of {receive buffer, U32 context,
// token}.
HloInstructionProto recv_instr;
@@ -2005,31 +2015,81 @@ XlaOp XlaBuilder::Recv(const Shape& shape, const ChannelHandle& handle) {
*recv_done_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
recv_done_instr.set_channel_id(handle.handle());
- TF_ASSIGN_OR_RETURN(XlaOp recv_done,
- AddInstruction(std::move(recv_done_instr),
- HloOpcode::kRecvDone, {recv}));
+ return AddInstruction(std::move(recv_done_instr), HloOpcode::kRecvDone,
+ {recv});
+ });
+}
- // The RecvDone instruction produces a tuple of the data and a token
- // type. Return XLA op containing the data.
- // TODO(b/80000000): Remove this when clients have been updated to handle
- // tokens.
- HloInstructionProto recv_data;
- *recv_data.mutable_shape() = shape;
- recv_data.set_tuple_index(0);
- return AddInstruction(std::move(recv_data), HloOpcode::kGetTupleElement,
- {recv_done});
+XlaOp XlaBuilder::SendToHost(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const ChannelHandle& handle) {
+ return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ if (!LayoutUtil::HasLayout(shape_with_layout)) {
+ return InvalidArgument("Shape passed to SendToHost must have a layout");
+ }
+ TF_ASSIGN_OR_RETURN(const Shape& operand_shape, GetShape(operand));
+ if (!ShapeUtil::Compatible(operand_shape, shape_with_layout)) {
+ return InvalidArgument(
+ "SendToHost shape %s must be compatible with operand shape %s",
+ ShapeUtil::HumanStringWithLayout(shape_with_layout).c_str(),
+ ShapeUtil::HumanStringWithLayout(operand_shape).c_str());
+ }
+ // TODO(b/111544877): Support tuple shapes.
+ if (!ShapeUtil::IsArray(operand_shape)) {
+ return InvalidArgument("SendToHost only supports array shapes, shape: %s",
+ ShapeUtil::HumanString(operand_shape).c_str());
+ }
+
+ if (handle.type() != ChannelHandle::DEVICE_TO_HOST) {
+ return InvalidArgument("SendToHost must use a device-to-host channel");
+ }
+
+ // Send instruction produces a tuple of {aliased operand, U32 context,
+ // token}.
+ HloInstructionProto send_instr;
+ *send_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
+ {shape_with_layout, ShapeUtil::MakeShape(U32, {}),
+ ShapeUtil::MakeTokenShape()});
+ send_instr.set_channel_id(handle.handle());
+ send_instr.set_is_host_transfer(true);
+ TF_ASSIGN_OR_RETURN(XlaOp send,
+ AddInstruction(std::move(send_instr), HloOpcode::kSend,
+ {operand, token}));
+
+ HloInstructionProto send_done_instr;
+ *send_done_instr.mutable_shape() = ShapeUtil::MakeTokenShape();
+ send_done_instr.set_channel_id(handle.handle());
+ send_done_instr.set_is_host_transfer(true);
+ return AddInstruction(std::move(send_done_instr), HloOpcode::kSendDone,
+ {send});
});
}
-XlaOp XlaBuilder::RecvWithToken(const XlaOp& token, const Shape& shape,
- const ChannelHandle& handle) {
+XlaOp XlaBuilder::RecvFromHost(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle) {
return ReportErrorOrReturn([&]() -> StatusOr<XlaOp> {
+ if (!LayoutUtil::HasLayout(shape)) {
+ return InvalidArgument("Shape passed to RecvFromHost must have a layout");
+ }
+
+ // TODO(b/111544877): Support tuple shapes.
+ if (!ShapeUtil::IsArray(shape)) {
+ return InvalidArgument(
+ "RecvFromHost only supports array shapes, shape: %s",
+ ShapeUtil::HumanString(shape).c_str());
+ }
+
+ if (handle.type() != ChannelHandle::HOST_TO_DEVICE) {
+ return InvalidArgument("RecvFromHost must use a host-to-device channel");
+ }
+
// Recv instruction produces a tuple of {receive buffer, U32 context,
// token}.
HloInstructionProto recv_instr;
*recv_instr.mutable_shape() = ShapeUtil::MakeTupleShape(
{shape, ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()});
recv_instr.set_channel_id(handle.handle());
+ recv_instr.set_is_host_transfer(true);
TF_ASSIGN_OR_RETURN(XlaOp recv, AddInstruction(std::move(recv_instr),
HloOpcode::kRecv, {token}));
@@ -2037,6 +2097,7 @@ XlaOp XlaBuilder::RecvWithToken(const XlaOp& token, const Shape& shape,
*recv_done_instr.mutable_shape() =
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeTokenShape()});
recv_done_instr.set_channel_id(handle.handle());
+ recv_done_instr.set_is_host_transfer(true);
return AddInstruction(std::move(recv_done_instr), HloOpcode::kRecvDone,
{recv});
});
@@ -2760,6 +2821,17 @@ XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
return token.builder()->RecvWithToken(token, shape, handle);
}
+XlaOp SendToHost(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout, const ChannelHandle& handle) {
+ return operand.builder()->SendToHost(operand, token, shape_with_layout,
+ handle);
+}
+
+XlaOp RecvFromHost(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle) {
+ return token.builder()->RecvFromHost(token, shape, handle);
+}
+
XlaOp InfeedWithToken(const XlaOp& token, const Shape& shape,
const string& config) {
return token.builder()->InfeedWithToken(token, shape, config);
@@ -2801,4 +2873,11 @@ XlaOp BatchNormGrad(const XlaOp& operand, const XlaOp& scale,
grad_output, epsilon, feature_index);
}
+XlaOp IotaGen(XlaBuilder* builder, PrimitiveType type, int64 size) {
+ HloInstructionProto instr;
+ *instr.mutable_shape() = ShapeUtil::MakeShape(type, {size});
+ return builder->ReportErrorOrReturn(
+ builder->AddInstruction(std::move(instr), HloOpcode::kIota));
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/client/xla_client/xla_builder.h b/tensorflow/compiler/xla/client/xla_client/xla_builder.h
index 2be6f4a553..8359d936b7 100644
--- a/tensorflow/compiler/xla/client/xla_client/xla_builder.h
+++ b/tensorflow/compiler/xla/client/xla_client/xla_builder.h
@@ -54,7 +54,16 @@ class XlaOp {
}
~XlaOp() = default;
- XlaBuilder* builder() const { return builder_; }
+ // Precondition: !IsUninitialized().
+ //
+ // It's very common to do foo.builder()->bar(). Without this precondition, if
+ // foo.builder() is null, the call to bar will segfault at some point possibly
+ // deep in the callstack when we finally dereference `this`. The precondition
+ // lets us avoid this tricky-to-debug problem.
+ XlaBuilder* builder() const {
+ CHECK(builder_ != nullptr);
+ return builder_;
+ }
// Returns true if the XlaOp represents valid, non-erroneous value.
bool valid() const { return handle_ >= 0; }
@@ -848,12 +857,21 @@ class XlaBuilder {
const GatherDimensionNumbers& dimension_numbers,
tensorflow::gtl::ArraySlice<int64> window_bounds);
- // Enqueues a Send node onto the computation, to send the given operand to
- // a Recv instruction that shares the same channel handle.
+ // Enqueues a Send node onto the computation for device-to-device
+ // communication, to send the given operand to a Recv instruction that shares
+ // the same channel handle.
void Send(const XlaOp& operand, const ChannelHandle& handle);
XlaOp SendWithToken(const XlaOp& operand, const XlaOp& token,
const ChannelHandle& handle);
+ // Enqueues a Send node which sends data to the host.
+ XlaOp SendToHost(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout, const ChannelHandle& handle);
+
+ // Enqueues a Recv node which receives data from the host.
+ XlaOp RecvFromHost(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle);
+
// Enqueues an AfterAll operation with no operands producing a token-shaped
// value.
XlaOp CreateToken();
@@ -1244,6 +1262,9 @@ class XlaBuilder {
friend XlaOp Pow(const XlaOp& lhs, const XlaOp& rhs,
tensorflow::gtl::ArraySlice<int64> broadcast_dimensions);
friend XlaOp IsFinite(const XlaOp& operand);
+ // TODO(b/64798317): Finish CPU & GPU implementation, then replace xla::Iota
+ // in xla/client/lib/numeric.h with this (renamed to xla::Iota).
+ friend XlaOp IotaGen(XlaBuilder* builder, PrimitiveType type, int64 size);
friend XlaOp ConvertElementType(const XlaOp& operand,
PrimitiveType new_element_type);
friend XlaOp BitcastConvertType(const XlaOp& operand,
@@ -1293,6 +1314,11 @@ class XlaBuilder {
const ChannelHandle& handle);
friend XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
const ChannelHandle& handle);
+ friend XlaOp SendToHost(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout,
+ const ChannelHandle& handle);
+ friend XlaOp RecvFromHost(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle);
friend XlaOp InfeedWithToken(const XlaOp& token, const Shape& shape,
const string& config);
friend XlaOp OutfeedWithToken(const XlaOp& operand, const XlaOp& token,
@@ -1951,8 +1977,10 @@ XlaOp Gather(const XlaOp& input, const XlaOp& gather_indices,
const GatherDimensionNumbers& dimension_numbers,
tensorflow::gtl::ArraySlice<int64> window_bounds);
-// Enqueues a Send node onto the computation, to send the given operand to
-// a Recv instruction that shares the same channel handle.
+// Enqueues a Send node onto the computation for device-to-device
+// communication. This operation sends the given operand to
+// a Recv instruction in a different computation that shares the same channel
+// handle.
void Send(const XlaOp& operand, const ChannelHandle& handle);
// Variant of Send which takes a token-shaped operand and produces a
@@ -1961,9 +1989,10 @@ void Send(const XlaOp& operand, const ChannelHandle& handle);
XlaOp SendWithToken(const XlaOp& operand, const XlaOp& token,
const ChannelHandle& handle);
-// Enqueues a Recv node onto the computation. The data comes from a Send
-// instruction that shares the same channel handle and its shape must
-// be the same as the given shape.
+// Enqueues a Recv node onto the computation for device-to-device
+// communication. The data comes from a Send instruction in a different
+// computation that shares the same channel handle and its shape must be the
+// same as the given shape.
XlaOp Recv(XlaBuilder* builder, const Shape& shape,
const ChannelHandle& handle);
@@ -1974,6 +2003,20 @@ XlaOp Recv(XlaBuilder* builder, const Shape& shape,
XlaOp RecvWithToken(const XlaOp& token, const Shape& shape,
const ChannelHandle& handle);
+// Enqueues a Send node which transfers data from the device to the host. The
+// 'shape_with_layout' argument defines the layout of the data transferred; its
+// shape must be compatible with the shape of the operand. The operand must be
+// array-shaped.
+// TODO(b/111544877): Support tuple shapes.
+XlaOp SendToHost(const XlaOp& operand, const XlaOp& token,
+ const Shape& shape_with_layout, const ChannelHandle& handle);
+
+// Enqueues a Recv node which transfers data from the host to the device. The
+// given shape must contain a layout and must be an array.
+// TODO(b/111544877): Support tuple shapes.
+XlaOp RecvFromHost(const XlaOp& token, const Shape& shape,
+ const ChannelHandle& handle);
+
// Enqueues an operation (AfterAll) with no operands that produces a
// token-shaped value. Tokens are used for ordering side-effecting operations.
// This is a separate method from AfterAll to facility the removal of
diff --git a/tensorflow/compiler/xla/literal.cc b/tensorflow/compiler/xla/literal.cc
index 5db124b5a2..0545deb096 100644
--- a/tensorflow/compiler/xla/literal.cc
+++ b/tensorflow/compiler/xla/literal.cc
@@ -1775,7 +1775,9 @@ void LiteralBase::Piece::WriteToProto(LiteralProto* proto) const {
// Nothing to do but assign the shape which is done above.
return;
default:
- LOG(FATAL) << "Unhandled primitive type " << subshape().element_type();
+ // TODO(b/111551621): Support serializing more PrimitiveTypes.
+ LOG(FATAL) << "Unhandled primitive type "
+ << PrimitiveType_Name(subshape().element_type());
}
}
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.cc b/tensorflow/compiler/xla/python/local_computation_builder.cc
index be55d50b23..8aefc4cd5e 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.cc
+++ b/tensorflow/compiler/xla/python/local_computation_builder.cc
@@ -486,6 +486,11 @@ LocalOp LocalComputationBuilder::ConvertElementType(
return xla::ConvertElementType(operand.op(), new_element_type);
}
+LocalOp LocalComputationBuilder::BitcastConvertType(
+ const LocalOp& operand, PrimitiveType new_element_type) {
+ return xla::BitcastConvertType(operand.op(), new_element_type);
+}
+
LocalOp LocalComputationBuilder::Call(
const LocalComputation& local_computation,
tensorflow::gtl::ArraySlice<LocalOp> operands) {
@@ -614,6 +619,11 @@ _FORWARD_BINOP(Min)
_FORWARD_BINOP(And)
_FORWARD_BINOP(Or)
_FORWARD_BINOP(Xor)
+_FORWARD_BINOP(ShiftLeft)
+_FORWARD_BINOP(ShiftRightArithmetic)
+_FORWARD_BINOP(ShiftRightLogical)
+_FORWARD_BINOP(Atan2)
+_FORWARD_BINOP(Pow)
_FORWARD_UNOP(Not)
_FORWARD_UNOP(Abs)
_FORWARD_UNOP(Exp)
@@ -627,13 +637,27 @@ _FORWARD_UNOP(Sign)
_FORWARD_UNOP(Cos)
_FORWARD_UNOP(Sin)
_FORWARD_UNOP(Tanh)
-_FORWARD_UNOP(Sqrt)
-_FORWARD_UNOP(Square)
-_FORWARD_BINOP(Pow)
_FORWARD_UNOP(IsFinite)
-_FORWARD_UNOP(Reciprocal)
_FORWARD_UNOP(Neg)
_FORWARD_UNOP(Sort)
+_FORWARD_UNOP(Sqrt)
+_FORWARD_UNOP(Rsqrt)
+_FORWARD_UNOP(Square)
+_FORWARD_UNOP(Reciprocal)
+_FORWARD_UNOP(Erfc)
+_FORWARD_UNOP(Erf)
+_FORWARD_UNOP(ErfInv)
+_FORWARD_UNOP(Lgamma)
+_FORWARD_UNOP(Digamma)
+_FORWARD_UNOP(Acos)
+_FORWARD_UNOP(Asin)
+_FORWARD_UNOP(Atan)
+_FORWARD_UNOP(Tan)
+_FORWARD_UNOP(Acosh)
+_FORWARD_UNOP(Asinh)
+_FORWARD_UNOP(Atanh)
+_FORWARD_UNOP(Cosh)
+_FORWARD_UNOP(Sinh)
#undef _FORWARD
#undef _FORWARD_UNOP
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.h b/tensorflow/compiler/xla/python/local_computation_builder.h
index 690ff277e8..dd9e2fbe72 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.h
+++ b/tensorflow/compiler/xla/python/local_computation_builder.h
@@ -259,6 +259,9 @@ class LocalComputationBuilder {
LocalOp ConvertElementType(const LocalOp& operand,
PrimitiveType new_element_type);
+ LocalOp BitcastConvertType(const LocalOp& operand,
+ PrimitiveType new_element_type);
+
LocalOp Call(const LocalComputation& local_computation,
tensorflow::gtl::ArraySlice<LocalOp> operands);
@@ -333,6 +336,11 @@ class LocalComputationBuilder {
_FORWARD_BINOP(And)
_FORWARD_BINOP(Or)
_FORWARD_BINOP(Xor)
+ _FORWARD_BINOP(ShiftLeft)
+ _FORWARD_BINOP(ShiftRightArithmetic)
+ _FORWARD_BINOP(ShiftRightLogical)
+ _FORWARD_BINOP(Atan2)
+ _FORWARD_BINOP(Pow)
_FORWARD_UNOP(Not)
_FORWARD_UNOP(Abs)
_FORWARD_UNOP(Exp)
@@ -346,13 +354,27 @@ class LocalComputationBuilder {
_FORWARD_UNOP(Cos)
_FORWARD_UNOP(Sin)
_FORWARD_UNOP(Tanh)
- _FORWARD_UNOP(Sqrt)
- _FORWARD_UNOP(Square)
- _FORWARD_BINOP(Pow)
_FORWARD_UNOP(IsFinite)
- _FORWARD_UNOP(Reciprocal)
_FORWARD_UNOP(Neg)
_FORWARD_UNOP(Sort)
+ _FORWARD_UNOP(Sqrt)
+ _FORWARD_UNOP(Rsqrt)
+ _FORWARD_UNOP(Square)
+ _FORWARD_UNOP(Reciprocal)
+ _FORWARD_UNOP(Erfc)
+ _FORWARD_UNOP(Erf)
+ _FORWARD_UNOP(ErfInv)
+ _FORWARD_UNOP(Lgamma)
+ _FORWARD_UNOP(Digamma)
+ _FORWARD_UNOP(Acos)
+ _FORWARD_UNOP(Asin)
+ _FORWARD_UNOP(Atan)
+ _FORWARD_UNOP(Tan)
+ _FORWARD_UNOP(Acosh)
+ _FORWARD_UNOP(Asinh)
+ _FORWARD_UNOP(Atanh)
+ _FORWARD_UNOP(Cosh)
+ _FORWARD_UNOP(Sinh)
#undef _FORWARD
#undef _FORWARD_UNOP
diff --git a/tensorflow/compiler/xla/python/local_computation_builder.i b/tensorflow/compiler/xla/python/local_computation_builder.i
index afdea88cb7..9b8b0aa7f2 100644
--- a/tensorflow/compiler/xla/python/local_computation_builder.i
+++ b/tensorflow/compiler/xla/python/local_computation_builder.i
@@ -957,6 +957,7 @@ tensorflow::ImportNumpy();
%unignore xla::swig::LocalComputationBuilder::Tuple;
%unignore xla::swig::LocalComputationBuilder::GetTupleElement;
%unignore xla::swig::LocalComputationBuilder::ConvertElementType;
+%unignore xla::swig::LocalComputationBuilder::BitcastConvertType;
%unignore xla::swig::LocalComputationBuilder::Call;
%unignore xla::swig::LocalComputationBuilder::Transpose;
%unignore xla::swig::LocalComputationBuilder::Rev;
@@ -989,6 +990,9 @@ tensorflow::ImportNumpy();
%unignore xla::swig::LocalComputationBuilder::And;
%unignore xla::swig::LocalComputationBuilder::Or;
%unignore xla::swig::LocalComputationBuilder::Xor;
+%unignore xla::swig::LocalComputationBuilder::ShiftLeft;
+%unignore xla::swig::LocalComputationBuilder::ShiftRightArithmetic;
+%unignore xla::swig::LocalComputationBuilder::ShiftRightLogical;
%unignore xla::swig::LocalComputationBuilder::Not;
%unignore xla::swig::LocalComputationBuilder::Abs;
%unignore xla::swig::LocalComputationBuilder::Exp;
@@ -1002,13 +1006,29 @@ tensorflow::ImportNumpy();
%unignore xla::swig::LocalComputationBuilder::Cos;
%unignore xla::swig::LocalComputationBuilder::Sin;
%unignore xla::swig::LocalComputationBuilder::Tanh;
-%unignore xla::swig::LocalComputationBuilder::Sqrt;
-%unignore xla::swig::LocalComputationBuilder::Square;
-%unignore xla::swig::LocalComputationBuilder::Pow;
+%unignore xla::swig::LocalComputationBuilder::Atan2;
%unignore xla::swig::LocalComputationBuilder::IsFinite;
-%unignore xla::swig::LocalComputationBuilder::Reciprocal;
+%unignore xla::swig::LocalComputationBuilder::Pow;
%unignore xla::swig::LocalComputationBuilder::Neg;
%unignore xla::swig::LocalComputationBuilder::Sort;
+%unignore xla::swig::LocalComputationBuilder::Sqrt;
+%unignore xla::swig::LocalComputationBuilder::Rsqrt;
+%unignore xla::swig::LocalComputationBuilder::Square;
+%unignore xla::swig::LocalComputationBuilder::Reciprocal;
+%unignore xla::swig::LocalComputationBuilder::Erfc;
+%unignore xla::swig::LocalComputationBuilder::Erf;
+%unignore xla::swig::LocalComputationBuilder::ErfInv;
+%unignore xla::swig::LocalComputationBuilder::Lgamma;
+%unignore xla::swig::LocalComputationBuilder::Digamma;
+%unignore xla::swig::LocalComputationBuilder::Acos;
+%unignore xla::swig::LocalComputationBuilder::Asin;
+%unignore xla::swig::LocalComputationBuilder::Atan;
+%unignore xla::swig::LocalComputationBuilder::Tan;
+%unignore xla::swig::LocalComputationBuilder::Acosh;
+%unignore xla::swig::LocalComputationBuilder::Asinh;
+%unignore xla::swig::LocalComputationBuilder::Atanh;
+%unignore xla::swig::LocalComputationBuilder::Cosh;
+%unignore xla::swig::LocalComputationBuilder::Sinh;
%unignore xla::swig::DestructureLocalShapedBufferTuple;
%unignore xla::swig::DeleteLocalShapedBuffer;
%unignore xla::swig::DeleteLocalComputation;
diff --git a/tensorflow/compiler/xla/python/xla_client.py b/tensorflow/compiler/xla/python/xla_client.py
index e2b6eaa096..c0105b385b 100644
--- a/tensorflow/compiler/xla/python/xla_client.py
+++ b/tensorflow/compiler/xla/python/xla_client.py
@@ -99,12 +99,27 @@ _UNARY_OPS = [
'Cos',
'Sin',
'Tanh',
+ 'IsFinite',
'Sqrt',
+ 'Rsqrt',
'Square',
- 'IsFinite',
'Reciprocal',
'Neg',
'Sort',
+ 'Erf',
+ 'Erfc',
+ 'ErfInv',
+ 'Lgamma',
+ 'Digamma',
+ 'Acos',
+ 'Asin',
+ 'Atan',
+ 'Tan',
+ 'Acosh',
+ 'Asinh',
+ 'Atanh',
+ 'Cosh',
+ 'Sinh',
]
_BINARY_OPS = [
@@ -125,6 +140,10 @@ _BINARY_OPS = [
'Or',
'Xor',
'Pow',
+ 'ShiftLeft',
+ 'ShiftRightArithmetic',
+ 'ShiftRightLogical',
+ 'Atan2',
]
@@ -702,6 +721,18 @@ class ComputationBuilder(object):
"""
return self._client.ConvertElementType(operand, new_element_type)
+ def BitcastConvertType(self, operand, new_element_type):
+ """Enqueues a bitcast type conversion operation onto the computation.
+
+ Args:
+ operand: the operand to convert.
+ new_element_type: the target primitive type.
+
+ Returns:
+ A LocalOp representing the added conversion op.
+ """
+ return self._client.BitcastConvertType(operand, new_element_type)
+
def GetShape(self, operand):
return _wrap_shape(self._client.GetShape(operand))
diff --git a/tensorflow/compiler/xla/python/xla_client_test.py b/tensorflow/compiler/xla/python/xla_client_test.py
index 0564ddcb85..fd98e19457 100644
--- a/tensorflow/compiler/xla/python/xla_client_test.py
+++ b/tensorflow/compiler/xla/python/xla_client_test.py
@@ -171,6 +171,24 @@ class ComputationsWithConstantsTest(LocalComputationTest):
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
+ def testShiftLeft(self):
+ c = self._NewComputation()
+ c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
+ c.Constant(NumpyArrayS32([2])))
+ self._ExecuteAndCompareClose(c, expected=[12])
+
+ def testShiftRightArithmetic(self):
+ c = self._NewComputation()
+ c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
+ c.Constant(NumpyArrayS32([1])))
+ self._ExecuteAndCompareClose(c, expected=[-1])
+
+ def testShiftRightLogical(self):
+ c = self._NewComputation()
+ c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
+ c.Constant(NumpyArrayS32([1])))
+ self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
+
def testGetProto(self):
c = self._NewComputation()
c.Add(
@@ -471,6 +489,34 @@ class SingleOpTest(LocalComputationTest):
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
+ def testBitcastConvertType(self):
+ xla_x32_types = {
+ np.int32: xla_client.xla_data_pb2.S32,
+ np.float32: xla_client.xla_data_pb2.F32,
+ }
+
+ xla_x64_types = {
+ np.int64: xla_client.xla_data_pb2.S64,
+ np.float64: xla_client.xla_data_pb2.F64,
+ }
+
+ def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
+ c = self._NewComputation()
+ x = c.Constant(np.array(template, dtype=src_dtype))
+ c.BitcastConvertType(x, dst_etype)
+
+ result = c.Build().Compile().Execute()
+ expected = np.array(template, src_dtype).view(dst_dtype)
+
+ self.assertEqual(result.shape, expected.shape)
+ self.assertEqual(result.dtype, expected.dtype)
+ np.testing.assert_equal(result, expected)
+
+ x = [0, 1, 0, 0, 1]
+ for xla_types in [xla_x32_types, xla_x64_types]:
+ for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
+ _ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
+
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index 85c6c632cd..cba7883fde 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -182,6 +182,7 @@ tf_cc_test(
name = "shape_inference_test",
srcs = ["shape_inference_test.cc"],
deps = [
+ ":hlo",
":shape_inference",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:test",
@@ -1056,6 +1057,7 @@ cc_library(
hdrs = ["hlo_module_group_metadata.h"],
deps = [
":hlo",
+ ":hlo_casting_utils",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status",
"//tensorflow/compiler/xla:status_macros",
@@ -2009,6 +2011,7 @@ cc_library(
deps = [
":computation_layout",
":hlo",
+ ":hlo_casting_utils",
":hlo_dce",
":hlo_graph_dumper",
":hlo_pass",
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier.cc b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
index af7728da54..26a8a67601 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier.cc
@@ -1156,6 +1156,19 @@ Status AlgebraicSimplifierVisitor::HandleMultiply(HloInstruction* multiply) {
return Status::OK();
}
+ // 0*A => 0. Only applies for integral types for correct NaN-handling.
+ if (IsAll(lhs, 0) &&
+ primitive_util::IsIntegralType(multiply->shape().element_type()) &&
+ ReplaceInstructionIfSameShape(multiply, lhs)) {
+ return Status::OK();
+ }
+ // A*0 => 0
+ if (IsAll(rhs, 0) &&
+ primitive_util::IsIntegralType(multiply->shape().element_type()) &&
+ ReplaceInstructionIfSameShape(multiply, rhs)) {
+ return Status::OK();
+ }
+
// exp(A) * exp(B) => exp(A+B)
if (Match(multiply, m::Multiply(m::Exp(m::Op(&lhs)), m::Exp(m::Op(&rhs))))) {
auto add = computation_->AddInstruction(HloInstruction::CreateBinary(
@@ -1731,6 +1744,25 @@ Status AlgebraicSimplifierVisitor::HandleSlice(HloInstruction* slice) {
if (ReplaceInstructionIfSameShape(slice, slice->mutable_operand(0))) {
return Status::OK();
}
+
+ auto is_unstrided_slice = [](const HloInstruction* hlo) {
+ return c_all_of(hlo->slice_strides(),
+ [](int64 stride) { return stride == 1; });
+ };
+ if (slice->operand(0)->opcode() == HloOpcode::kSlice &&
+ is_unstrided_slice(slice) && is_unstrided_slice(slice->operand(0))) {
+ HloInstruction* operand_slice = slice->mutable_operand(0);
+ std::vector<int64> new_slice_starts = slice->slice_starts();
+ std::vector<int64> new_slice_limits = slice->slice_limits();
+ for (int64 i = 0; i < new_slice_starts.size(); ++i) {
+ new_slice_starts[i] += operand_slice->slice_starts(i);
+ new_slice_limits[i] += operand_slice->slice_starts(i);
+ }
+ return ReplaceWithNewInstruction(
+ slice, HloInstruction::CreateSlice(
+ slice->shape(), operand_slice->mutable_operand(0),
+ new_slice_starts, new_slice_limits, slice->slice_strides()));
+ }
return Status::OK();
}
@@ -1891,6 +1923,26 @@ Status AlgebraicSimplifierVisitor::HandleReduce(HloInstruction* reduce) {
new_reduce_dimensions, function));
}
}
+ // Convert Reduce(concat({a,b,...})) to
+ // map(reduce(a),map(reduce(b),...,))
+ //
+ // This should make fusion easier or use less memory bandwidth in the unfused
+ // case.
+ if (arg->opcode() == HloOpcode::kConcatenate &&
+ c_linear_search(reduce->dimensions(), arg->concatenate_dimension())) {
+ HloInstruction* old_reduce = nullptr;
+ for (HloInstruction* operand : arg->operands()) {
+ HloInstruction* new_reduce = computation_->AddInstruction(
+ HloInstruction::CreateReduce(reduce->shape(), operand, init_value,
+ reduce->dimensions(), function));
+ if (old_reduce != nullptr) {
+ new_reduce = computation_->AddInstruction(HloInstruction::CreateMap(
+ reduce->shape(), {old_reduce, new_reduce}, function));
+ }
+ old_reduce = new_reduce;
+ }
+ return ReplaceInstruction(reduce, old_reduce);
+ }
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
index 92bbcbd740..ddf0a513c0 100644
--- a/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/algebraic_simplifier_test.cc
@@ -74,6 +74,26 @@ TEST_F(AlgebraicSimplifierTest, AddZero) {
EXPECT_EQ(root, param0);
}
+// Test that A * 0 is simplified to 0
+TEST_F(AlgebraicSimplifierTest, MulZero) {
+ Shape r0s32 = ShapeUtil::MakeShape(S32, {});
+ HloComputation::Builder builder(TestName());
+ HloInstruction* param0 = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, r0s32, "param0"));
+ HloInstruction* zero = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32>(0)));
+ builder.AddInstruction(
+ HloInstruction::CreateBinary(r0s32, HloOpcode::kMultiply, param0, zero));
+
+ auto computation = module().AddEntryComputation(builder.Build());
+ HloInstruction* root = computation->root_instruction();
+ EXPECT_EQ(root->opcode(), HloOpcode::kMultiply);
+ AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
+ non_bitcasting_callback());
+ ASSERT_TRUE(simplifier.Run(&module()).ValueOrDie());
+ EXPECT_EQ(computation->root_instruction(), zero);
+}
+
// Test that Reduce(Reduce(A)) -> Reduce(A)
TEST_F(AlgebraicSimplifierTest, TwoReducesToOne) {
HloComputation::Builder builder(TestName());
@@ -1230,6 +1250,55 @@ TEST_F(AlgebraicSimplifierTest, RemoveEmptyConcatenateOperands) {
op::Concatenate(param0, param0, param1));
}
+// Test that reduce of concat is simplified.
+TEST_F(AlgebraicSimplifierTest, SimplifyReduceOfConcat) {
+ const int kParamLength = 100;
+ Shape r3f32 =
+ ShapeUtil::MakeShape(F32, {kParamLength, kParamLength, kParamLength});
+ HloComputation::Builder builder(TestName());
+ HloInstruction* param0 = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, r3f32, "param0"));
+ HloInstruction* param1 = builder.AddInstruction(
+ HloInstruction::CreateParameter(1, r3f32, "param1"));
+ HloInstruction* param2 = builder.AddInstruction(
+ HloInstruction::CreateParameter(2, r3f32, "param2"));
+ Shape concat_shape =
+ ShapeUtil::MakeShape(F32, {kParamLength, 3 * kParamLength, kParamLength});
+ HloInstruction* Concatenate =
+ builder.AddInstruction(HloInstruction::CreateConcatenate(
+ concat_shape, {param0, param1, param2}, 1));
+ HloComputation* add_computation = nullptr;
+ {
+ HloComputation::Builder builder(TestName() + ".add");
+ const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
+ HloInstruction* p0 = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, scalar_shape, "p0"));
+ HloInstruction* p1 = builder.AddInstruction(
+ HloInstruction::CreateParameter(1, scalar_shape, "p1"));
+ builder.AddInstruction(
+ HloInstruction::CreateBinary(scalar_shape, HloOpcode::kAdd, p0, p1));
+ add_computation = module().AddEmbeddedComputation(builder.Build());
+ }
+ Shape r4f32 = ShapeUtil::MakeShape(F32, {4, 5, 6, 7});
+ Shape reduce_shape = ShapeUtil::MakeShape(F32, {kParamLength});
+
+ HloInstruction* zero = builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0)));
+ builder.AddInstruction(HloInstruction::CreateReduce(
+ reduce_shape, Concatenate, zero, {1, 2}, add_computation));
+
+ auto computation = module().AddEntryComputation(builder.Build());
+
+ AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
+ non_bitcasting_callback());
+ ASSERT_TRUE(simplifier.Run(&module()).ValueOrDie());
+
+ EXPECT_THAT(
+ computation->root_instruction(),
+ op::Map(op::Map(op::Reduce(param0, zero), op::Reduce(param1, zero)),
+ op::Reduce(param2, zero)));
+}
+
// Test a concatenate with only empty operands is removed.
TEST_F(AlgebraicSimplifierTest, OnlyEmptyConcatenateOperands) {
const int kParamLength = 100;
@@ -1839,6 +1908,39 @@ TEST_F(AlgebraicSimplifierTest, RemoveNoopSlice) {
EXPECT_THAT(computation->root_instruction(), param);
}
+TEST_F(AlgebraicSimplifierTest, SliceOfSliceToSlice) {
+ HloComputation::Builder builder(TestName());
+ const int64 dim0 = 11;
+ const int64 dim1 = 12;
+ HloInstruction* param =
+ builder.AddInstruction(HloInstruction::CreateParameter(
+ 0, ShapeUtil::MakeShape(F32, {dim0, dim1}), "param"));
+ HloInstruction* original_slice =
+ builder.AddInstruction(HloInstruction::CreateSlice(
+ ShapeUtil::MakeShape(F32, {dim0 - 2, dim1 - 4}), param,
+ /*start_indices=*/{1, 2},
+ /*limit_indices=*/{dim0 - 1, dim1 - 2}, /*strides=*/{1, 1}));
+
+ builder.AddInstruction(HloInstruction::CreateSlice(
+ ShapeUtil::MakeShape(F32, {dim0 - 5, dim1 - 9}), original_slice,
+ /*start_indices=*/{2, 3},
+ /*limit_indices=*/{dim0 - 3, dim1 - 6}, /*strides=*/{1, 1}));
+ auto module = CreateNewModule();
+ HloComputation* computation = module->AddEntryComputation(builder.Build());
+
+ EXPECT_THAT(computation->root_instruction(), op::Slice(op::Slice(param)));
+
+ AlgebraicSimplifier simplifier(/*is_layout_sensitive=*/false,
+ non_bitcasting_callback());
+ ASSERT_TRUE(simplifier.Run(module).ValueOrDie());
+
+ EXPECT_THAT(computation->root_instruction(), op::Slice(param));
+ EXPECT_EQ(computation->root_instruction()->slice_starts(0), 3);
+ EXPECT_EQ(computation->root_instruction()->slice_starts(1), 5);
+ EXPECT_EQ(computation->root_instruction()->slice_limits(0), dim0 - 2);
+ EXPECT_EQ(computation->root_instruction()->slice_limits(1), dim1 - 4);
+}
+
TEST_F(AlgebraicSimplifierTest, ConvertConvToMatmul) {
struct ConvTestOptions {
int in_batch = 10;
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.cc b/tensorflow/compiler/xla/service/buffer_assignment.cc
index afe4b2e142..b4c7cf0dd8 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment.cc
@@ -270,7 +270,7 @@ BufferAllocationProto BufferAllocation::ToProto() const {
proto.set_index(index_);
proto.set_size(size_);
proto.set_is_thread_local(is_thread_local_);
- proto.set_is_reusable(is_reusable_);
+ proto.set_is_tuple(is_tuple_);
proto.set_color(color_.value());
if (is_entry_computation_parameter_) {
proto.set_is_entry_computation_parameter(true);
@@ -279,6 +279,7 @@ BufferAllocationProto BufferAllocation::ToProto() const {
}
proto.set_parameter_number(parameter_number_);
}
+ proto.set_is_constant(is_constant_);
proto.set_maybe_live_out(maybe_live_out_);
for (const auto& buffer_offset_size : assigned_buffers_) {
BufferAllocationProto::Assigned* proto_assigned = proto.add_assigned();
@@ -304,6 +305,9 @@ string BufferAllocation::ToString() const {
StrAppend(&output, ", parameter ", parameter_number(), " at ShapeIndex ",
param_shape_index().ToString());
}
+ if (is_constant()) {
+ StrAppend(&output, ", constant");
+ }
if (is_thread_local()) {
StrAppend(&output, ", thread-local");
}
@@ -491,20 +495,16 @@ BufferAssignment::GetUniqueTopLevelOutputSlice() const {
}
BufferAllocation* BufferAssignment::NewEmptyAllocation(
- int64 size, bool is_thread_local, bool is_reusable,
- LogicalBuffer::Color color) {
+ int64 size, LogicalBuffer::Color color) {
BufferAllocation::Index index = allocations_.size();
- allocations_.emplace_back(index, size, is_thread_local, is_reusable, color);
+ allocations_.emplace_back(index, size, color);
BufferAllocation* allocation = &allocations_.back();
return allocation;
}
BufferAllocation* BufferAssignment::NewAllocation(const LogicalBuffer& buffer,
- int64 size,
- bool is_thread_local,
- bool is_reusable) {
- BufferAllocation* allocation =
- NewEmptyAllocation(size, is_thread_local, is_reusable, buffer.color());
+ int64 size) {
+ BufferAllocation* allocation = NewEmptyAllocation(size, buffer.color());
AddAssignment(allocation, buffer, /*offset=*/0, size);
allocation->peak_buffers_.push_back(&buffer);
return allocation;
@@ -517,7 +517,8 @@ void BufferAssignment::AddAssignment(BufferAllocation* allocation,
CHECK_EQ(0, allocation_index_for_buffer_.count(&buffer))
<< "LogicalBuffer " << buffer << " already has an allocation.";
CHECK(allocation->is_reusable() || allocation->assigned_buffers().empty())
- << "Non-reusable allocation already assigned a buffer";
+ << "Non-reusable allocation already assigned a buffer: "
+ << allocation->ToString();
TF_CHECK_OK(points_to_analysis().VerifyBuffer(buffer));
@@ -609,6 +610,10 @@ Status BufferAssignment::ComputeSummaryStats() {
stats_.parameter_allocation_count++;
stats_.parameter_allocation_bytes += allocation.size();
}
+ if (allocation.is_constant()) {
+ stats_.constant_allocation_count++;
+ stats_.constant_allocation_bytes += allocation.size();
+ }
if (allocation.maybe_live_out()) {
stats_.maybe_live_out_allocation_count++;
stats_.maybe_live_out_allocation_bytes += allocation.size();
@@ -645,6 +650,8 @@ string BufferAssignment::Stats::ToString() const {
Appendf(&s, "BufferAssignment stats:\n");
Appendf(&s, " parameter allocation: %10s\n",
HumanReadableNumBytes(parameter_allocation_bytes).c_str());
+ Appendf(&s, " constant allocation: %10s\n",
+ HumanReadableNumBytes(constant_allocation_bytes).c_str());
Appendf(&s, " maybe_live_out allocation: %10s\n",
HumanReadableNumBytes(maybe_live_out_allocation_bytes).c_str());
Appendf(&s, " preallocated temp allocation: %10s\n",
@@ -722,8 +729,10 @@ StatusOr<std::unique_ptr<BufferAssignment>> BufferAssigner::Run(
const HloModule* module, std::unique_ptr<HloOrdering> hlo_ordering,
LogicalBuffer::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
- bool allow_input_output_aliasing, BufferLiveness::Colorer colorer) {
- BufferAssigner assigner(allow_input_output_aliasing, std::move(colorer));
+ bool allow_input_output_aliasing, bool allocate_buffers_for_constants,
+ BufferLiveness::Colorer colorer) {
+ BufferAssigner assigner(allow_input_output_aliasing,
+ allocate_buffers_for_constants, std::move(colorer));
return assigner.CreateAssignment(module, std::move(hlo_ordering),
std::move(buffer_size),
std::move(color_alignment));
@@ -751,8 +760,8 @@ bool BufferAssigner::MaybeAssignBuffer(BufferAllocation* allocation,
return false;
}
- if (allocation->is_entry_computation_parameter()) {
- VLOG(4) << "Can't assign: allocation holds parameter";
+ if (allocation->is_readonly()) {
+ VLOG(4) << "Can't assign: allocation is readonly";
return false;
}
@@ -905,15 +914,19 @@ Status BufferAssigner::AssignBuffersForComputation(
TF_RET_CHECK(!assignment->HasAllocation(*buffer));
const HloInstruction* instruction = buffer->instruction();
+ const int64 buffer_size = assignment->buffer_size_(*buffer);
+
if (instruction->opcode() == HloOpcode::kConstant) {
- // No BufferAllocations for constants.
- // TODO(b/32248867): For consistency, constants should get allocations.
- VLOG(3) << "Skipping constant: " << *buffer;
+ if (allocate_buffers_for_constants_) {
+ BufferAllocation* allocation =
+ assignment->NewAllocation(*buffer, buffer_size);
+ allocation->set_constant(true);
+ VLOG(3) << "New allocation #" << allocation->index() << " for constant "
+ << *buffer;
+ }
continue;
}
- const int64 buffer_size = assignment->buffer_size_(*buffer);
-
const bool is_entry_parameter =
instruction->opcode() == HloOpcode::kParameter &&
computation == computation->parent()->entry_computation();
@@ -923,9 +936,7 @@ Status BufferAssigner::AssignBuffersForComputation(
// computations do not need special allocations because they live inside
// callers.
BufferAllocation* allocation =
- assignment->NewAllocation(*buffer, buffer_size,
- /*is_thread_local=*/false,
- /*is_reusable=*/false);
+ assignment->NewAllocation(*buffer, buffer_size);
allocation->set_entry_computation_parameter(
instruction->parameter_number(), buffer->index());
VLOG(3) << "New allocation #" << allocation->index()
@@ -934,20 +945,18 @@ Status BufferAssigner::AssignBuffersForComputation(
}
if (is_thread_local) {
- // We do not reuse thread-local buffers for now, because they are
- // dynamically allocated and their lifetimes are hard to compute.
- BufferAllocation* allocation = assignment->NewAllocation(
- *buffer, buffer_size, is_thread_local, /*is_reusable=*/false);
+ BufferAllocation* allocation =
+ assignment->NewAllocation(*buffer, buffer_size);
+ allocation->set_is_thread_local(true);
VLOG(3) << "New allocation #" << allocation->index()
<< " for thread-local: " << *buffer;
continue;
}
if (ShapeUtil::IsTuple(buffer->shape())) {
- // TODO(b/34669761): Don't reuse tuple buffers because the GPU backend
- // assumes longer buffer liveness than indicated by the analysis.
- BufferAllocation* allocation = assignment->NewAllocation(
- *buffer, buffer_size, is_thread_local, /*is_reusable=*/false);
+ BufferAllocation* allocation =
+ assignment->NewAllocation(*buffer, buffer_size);
+ allocation->set_is_tuple(true);
VLOG(3) << "New allocation #" << allocation->index()
<< " for tuple-shaped buffer: " << *buffer;
continue;
@@ -1030,8 +1039,8 @@ Status BufferAssigner::AssignBuffersForComputation(
}
if (!assignment->HasAllocation(*buffer)) {
- BufferAllocation* allocation = assignment->NewAllocation(
- *buffer, buffer_size, is_thread_local, /*is_reusable=*/true);
+ BufferAllocation* allocation =
+ assignment->NewAllocation(*buffer, buffer_size);
allocation_indices.push_back(allocation->index());
VLOG(3) << "New allocation #" << allocation->index()
<< " for: " << *buffer;
@@ -1085,6 +1094,7 @@ Status BufferAssigner::AssignBuffersWithSequentialOrdering(
VLOG(2) << "Simulating heap for color " << color;
int64 alignment = assignment->color_alignment_(color);
HeapSimulator::Options options;
+ options.alloc_constants = allocate_buffers_for_constants_;
BufferValueFlatSet buffer_value_set =
ToBufferValueFlatSet(single_colored_set.second);
options.buffers_to_assign = &buffer_value_set;
@@ -1227,8 +1237,8 @@ void BufferAssigner::AssignBuffersFromHeapSimulator(
result.fragmentation_size;
}
- BufferAllocation* allocation = assignment->NewEmptyAllocation(
- result.heap_size, /*is_thread_local=*/false, /*is_reusable=*/true, color);
+ BufferAllocation* allocation =
+ assignment->NewEmptyAllocation(result.heap_size, color);
for (const auto& buffer_chunk : result.chunk_map) {
// TODO(lauj) Remove this down_cast after downstream users of
// BufferAllocation::assigned_buffers() are updated to use BufferValue.
@@ -1444,8 +1454,23 @@ void BufferAssigner::BuildColocatedBufferSets(
});
} else if (opcode == HloOpcode::kCall) {
const HloInstruction* call_hlo = instruction;
- const HloInstruction* root_hlo =
- call_hlo->to_apply()->root_instruction();
+ const HloComputation* callee = call_hlo->to_apply();
+ const HloInstruction* root_hlo = callee->root_instruction();
+ for (int64 i = 0; i < call_hlo->operand_count(); i++) {
+ const HloInstruction* call_param = callee->parameter_instruction(i);
+ const HloInstruction* call_operand = call_hlo->operand(i);
+ ShapeUtil::ForEachSubshape(
+ call_operand->shape(),
+ [&](const Shape& /*subshape*/, const ShapeIndex& index) {
+ std::vector<const LogicalBuffer*> colocated_set;
+ AddBufferToColocatedSet(call_param, index, points_to_analysis,
+ &colocated_set);
+ AddBufferToColocatedSet(call_operand, index, points_to_analysis,
+ &colocated_set);
+ AddSetToColocatedBufferSets(colocated_set,
+ colocated_buffer_sets);
+ });
+ }
ShapeUtil::ForEachSubshape(
call_hlo->shape(),
[this, call_hlo, root_hlo, &points_to_analysis,
@@ -1551,6 +1576,7 @@ void BufferAssigner::AssignColocatedBufferSets(
// param in 'colocated_buffer_set'.
int64 entry_parameter_number = -1;
const ShapeIndex* entry_parameter_shape_idx = nullptr;
+ bool is_constant = false;
for (const LogicalBuffer* buffer : colocated_buffer_set) {
const HloInstruction* instruction = buffer->instruction();
const HloComputation* computation = instruction->parent();
@@ -1558,10 +1584,14 @@ void BufferAssigner::AssignColocatedBufferSets(
computation == computation->parent()->entry_computation()) {
entry_parameter_number = instruction->parameter_number();
entry_parameter_shape_idx = &buffer->index();
- break;
+ } else if (instruction->opcode() == HloOpcode::kConstant) {
+ is_constant = true;
}
}
+ CHECK(!is_constant || entry_parameter_number == -1)
+ << "Copy insertion should have inserted copies to prevent this.";
+
for (const LogicalBuffer* buffer : colocated_buffer_set) {
const int64 buffer_size = assignment->buffer_size_(*buffer);
if (allocation == nullptr) {
@@ -1569,18 +1599,14 @@ void BufferAssigner::AssignColocatedBufferSets(
// allocations for each colocated buffer set. When liveness has
// module-level scope, we can allow buffers to be shared across
// computations (in some cases).
- allocation = assignment->NewAllocation(*buffer, buffer_size,
- /*is_thread_local=*/false,
- /*is_reusable=*/true);
+ allocation = assignment->NewAllocation(*buffer, buffer_size);
if (entry_parameter_number >= 0) {
- // This colocated buffer set contains an entry parameter and other
- // logical buffers which use the parameter as read-only in a while
- // body computation (which updates in place).
- // Set 'entry_computation_parameter' to indicate that it contains
- // an entry parameter, and to prevent reuse in MaybeAssignBuffer.
allocation->set_entry_computation_parameter(
entry_parameter_number, *entry_parameter_shape_idx);
}
+ if (is_constant) {
+ allocation->set_constant(true);
+ }
colocated_allocations->insert(allocation->index());
} else {
CHECK_EQ(buffer_size, allocation->size())
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.h b/tensorflow/compiler/xla/service/buffer_assignment.h
index ad0b0bf7c2..4fcf1fc73d 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.h
+++ b/tensorflow/compiler/xla/service/buffer_assignment.h
@@ -58,13 +58,8 @@ class BufferAllocation {
// contiguously and can be used as array indexes.
using Index = int64;
- BufferAllocation(Index index, int64 size, bool is_thread_local,
- bool is_reusable, LogicalBuffer::Color color)
- : index_(index),
- size_(size),
- is_thread_local_(is_thread_local),
- is_reusable_(is_reusable),
- color_(color) {}
+ BufferAllocation(Index index, int64 size, LogicalBuffer::Color color)
+ : index_(index), size_(size), color_(color) {}
~BufferAllocation() {}
// Returns the index of this allocation.
@@ -74,9 +69,28 @@ class BufferAllocation {
// inside of a map or reduce computation. Such allocations need to be thread
// local.
bool is_thread_local() const { return is_thread_local_; }
+ void set_is_thread_local(bool is_thread_local) {
+ is_thread_local_ = is_thread_local;
+ }
// Whether this allocation can be used by more than one logical buffer.
- bool is_reusable() const { return is_reusable_; }
+ bool is_reusable() const {
+ // We do not reuse thread-local buffers for now, because they are
+ // dynamically allocated and their lifetimes are hard to compute.
+ //
+ // TODO(b/34669761): Don't reuse tuple buffers because the GPU backend
+ // assumes longer buffer liveness than indicated by the analysis.
+ return !is_thread_local() && !is_tuple();
+ }
+
+ // Whether this allocation is readonly i.e. backed by memory we cannot write
+ // to.
+ bool is_readonly() const {
+ return is_entry_computation_parameter() || is_constant();
+ }
+
+ bool is_tuple() const { return is_tuple_; }
+ void set_is_tuple(bool is_tuple) { is_tuple_ = is_tuple; }
// Whether this allocation holds a LogicalBuffer from a parameter of the entry
// computation. These buffers have lifetimes which may be longer than the
@@ -84,6 +98,13 @@ class BufferAllocation {
bool is_entry_computation_parameter() const {
return is_entry_computation_parameter_;
}
+
+ // Whether this allocation holds a constant. On the CPU and GPU backends
+ // constant allocations are not allocated dynamically, instead we resolve
+ // references to these buffer allocations to a global in the readonly section
+ // of the binary.
+ bool is_constant() const { return is_constant_; }
+
// If this allocation holds a Buffer from a parameter of the entry
// computation, this methods returns the parameter number. CHECKs otherwise.
int64 parameter_number() const {
@@ -189,7 +210,9 @@ class BufferAllocation {
// of the computation.
!maybe_live_out() &&
// Thread-local buffers are allocated using `alloca`s.
- !is_thread_local();
+ !is_thread_local() &&
+ // Constant buffers are allocated as global values.
+ !is_constant();
}
// Add a heap trace which was used to assign slices to logical buffers in this
@@ -245,6 +268,8 @@ class BufferAllocation {
parameter_number_ = parameter_number;
param_shape_index_ = std::move(param_shape_index);
}
+
+ void set_constant(bool is_constant) { is_constant_ = is_constant; }
void set_maybe_live_out(bool value) { maybe_live_out_ = value; }
void set_index(Index index) { index_ = index; }
void set_size(int64 size) { size_ = size; }
@@ -256,10 +281,10 @@ class BufferAllocation {
int64 size_;
// Whether this buffer needs to be thread-local.
- bool is_thread_local_;
+ bool is_thread_local_ = false;
- // Whether this buffer is usable by more than one logical buffer.
- bool is_reusable_;
+ // Whether this buffer holds a tuple.
+ bool is_tuple_ = false;
// Color of the allocation.
LogicalBuffer::Color color_;
@@ -283,6 +308,9 @@ class BufferAllocation {
// might not actually escape.
bool maybe_live_out_ = false;
+ // See comment on the is_constant() accessor.
+ bool is_constant_ = false;
+
// Mapping from the set of buffers assigned to this allocation to their
// logical offsets and sizes.
tensorflow::gtl::FlatMap<const LogicalBuffer*, OffsetSize> assigned_buffers_;
@@ -398,6 +426,8 @@ class BufferAssignment {
struct Stats {
int64 parameter_allocation_count = 0;
int64 parameter_allocation_bytes = 0;
+ int64 constant_allocation_count = 0;
+ int64 constant_allocation_bytes = 0;
int64 maybe_live_out_allocation_count = 0;
int64 maybe_live_out_allocation_bytes = 0;
int64 preallocated_temp_allocation_count = 0;
@@ -426,14 +456,11 @@ class BufferAssignment {
// Creates and returns a new BufferAllocation, with no assigned
// LogicalBuffers. Ownership is maintained internally.
- BufferAllocation* NewEmptyAllocation(int64 size, bool is_thread_local,
- bool is_reusable,
- LogicalBuffer::Color color);
+ BufferAllocation* NewEmptyAllocation(int64 size, LogicalBuffer::Color color);
// Helper that calls NewEmptyAllocation and AddAssignment in one call,
// creating an allocation containing a single LogicalBuffer.
- BufferAllocation* NewAllocation(const LogicalBuffer& buffer, int64 size,
- bool is_thread_local, bool is_reusable);
+ BufferAllocation* NewAllocation(const LogicalBuffer& buffer, int64 size);
// Adds a LogicalBuffer to the set assigned to the given allocation.
void AddAssignment(BufferAllocation* allocation, const LogicalBuffer& buffer,
@@ -493,12 +520,15 @@ class BufferAssigner {
LogicalBuffer::SizeFunction buffer_size,
LogicalBuffer::AlignmentFunction color_alignment,
bool allow_input_output_aliasing = false,
+ bool allocate_buffers_for_constants = false,
BufferLiveness::Colorer colorer = BufferLiveness::DefaultColorer());
private:
BufferAssigner(bool allow_input_output_aliasing,
+ bool allocate_buffers_for_constants,
BufferLiveness::Colorer colorer)
: allow_input_output_aliasing_(allow_input_output_aliasing),
+ allocate_buffers_for_constants_(allocate_buffers_for_constants),
colorer_(colorer) {}
virtual ~BufferAssigner() = default;
@@ -595,6 +625,9 @@ class BufferAssigner {
// buffers can be shared if their sizes match.
bool allow_input_output_aliasing_;
+ // If true, allocate buffers for constant instructions.
+ bool allocate_buffers_for_constants_;
+
// Functor used to assign colors to newly allocated logical buffers.
BufferLiveness::Colorer colorer_;
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index 125ade2a11..dea855d39a 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -89,7 +89,20 @@ class BufferAssignmentTest : public HloTestBase {
return BufferAssigner::Run(
module, xla::MakeUnique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
- [alignment](LogicalBuffer::Color) { return alignment; })
+ [alignment](LogicalBuffer::Color) { return alignment; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true)
+ .ConsumeValueOrDie();
+ }
+
+ std::unique_ptr<BufferAssignment> RunBufferAssignmentNoBuffersForConstants(
+ HloModule* module, int64 alignment = 1) {
+ return BufferAssigner::Run(
+ module, xla::MakeUnique<DependencyHloOrdering>(module),
+ backend().compiler()->BufferSizeBytesFunction(),
+ [alignment](LogicalBuffer::Color) { return alignment; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/false)
.ConsumeValueOrDie();
}
@@ -98,8 +111,9 @@ class BufferAssignmentTest : public HloTestBase {
return BufferAssigner::Run(
module, xla::MakeUnique<DependencyHloOrdering>(module),
backend().compiler()->BufferSizeBytesFunction(),
- [alignment](LogicalBuffer::Color) { return alignment; }, false,
- std::move(colorer))
+ [alignment](LogicalBuffer::Color) { return alignment; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true, std::move(colorer))
.ConsumeValueOrDie();
}
@@ -115,7 +129,9 @@ class BufferAssignmentTest : public HloTestBase {
module,
xla::MakeUnique<SequentialHloOrdering>(module, module_sequence),
backend().compiler()->BufferSizeBytesFunction(),
- [alignment](LogicalBuffer::Color) { return alignment; })
+ [alignment](LogicalBuffer::Color) { return alignment; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true)
.ConsumeValueOrDie();
}
@@ -294,9 +310,15 @@ TEST_F(BufferAssignmentTest, ScalarConstant) {
auto module = CreateNewModule();
module->AddEntryComputation(builder.Build());
- auto buffers = RunBufferAssignment(module.get());
- // Check that the constant does not have a buffer assigned.
- EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
+ {
+ auto buffers = RunBufferAssignment(module.get());
+ EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
+ }
+
+ {
+ auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
+ EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
+ }
}
TEST_F(BufferAssignmentTest, BufferForConst) {
@@ -312,12 +334,18 @@ TEST_F(BufferAssignmentTest, BufferForConst) {
auto module = CreateNewModule();
module->AddEntryComputation(builder.Build());
- auto buffers = RunBufferAssignment(module.get());
- // The two constant nodes have no buffers assigned.
- EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
- EXPECT_FALSE(buffers->HasTopLevelAllocation(const1));
- // The add node has an output buffer.
- GetAssignedOutputAllocation(*buffers, add);
+ {
+ auto buffers = RunBufferAssignment(module.get());
+ EXPECT_TRUE(buffers->HasTopLevelAllocation(const0));
+ EXPECT_TRUE(buffers->HasTopLevelAllocation(const1));
+ GetAssignedOutputAllocation(*buffers, add);
+ }
+ {
+ auto buffers = RunBufferAssignmentNoBuffersForConstants(module.get());
+ EXPECT_FALSE(buffers->HasTopLevelAllocation(const0));
+ EXPECT_FALSE(buffers->HasTopLevelAllocation(const1));
+ GetAssignedOutputAllocation(*buffers, add);
+ }
}
TEST_F(BufferAssignmentTest, HasAllocationAt) {
@@ -1094,7 +1122,7 @@ TEST_F(BufferAssignmentTest, EmbeddedComputationBuffers) {
// Allocations for the call computation should not be thread-local.
auto& call_param_alloc = GetTopLevelAllocation(*assignment, call_param);
- EXPECT_FALSE(call_param_alloc.is_entry_computation_parameter());
+ EXPECT_TRUE(call_param_alloc.is_entry_computation_parameter());
EXPECT_FALSE(call_param_alloc.maybe_live_out());
EXPECT_FALSE(call_param_alloc.is_thread_local());
@@ -1196,7 +1224,7 @@ TEST_F(BufferAssignmentTest, ElementOfNestedTupleParameterAsOutput) {
// TODO(b/32248867): Enable when buffer assignment gives allocations to
// constants.
-TEST_F(BufferAssignmentTest, DISABLED_TupleConstantAsOutput) {
+TEST_F(BufferAssignmentTest, TupleConstantAsOutput) {
// Test that a tuple constant which is forwarded to the computation output
// is properly handled.
auto builder = HloComputation::Builder(TestName());
@@ -1253,16 +1281,18 @@ TEST_F(BufferAssignmentTest, TupleCallAsOutput) {
auto assignment = RunBufferAssignment(module.get());
- EXPECT_EQ(3, assignment->Allocations().size());
+ EXPECT_EQ(2, assignment->Allocations().size());
// Buffers for call are colocated with the sub-computation.
EXPECT_EQ(GetAllocation(*assignment, call, /*index=*/{}),
GetAllocation(*assignment, sub_tuple, /*index=*/{}));
EXPECT_EQ(GetAllocation(*assignment, call, /*index=*/{0}),
GetAllocation(*assignment, sub_param, /*index=*/{}));
- // The parameter isn't aliased with anything.
+
+ // The parameter isn't aliased with the result tuple, but it is aliased with
+ // the call operand.
EXPECT_NE(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_tuple));
- EXPECT_NE(GetTopLevelAllocation(*assignment, param),
+ EXPECT_EQ(GetTopLevelAllocation(*assignment, param),
GetTopLevelAllocation(*assignment, sub_param));
}
@@ -1326,13 +1356,15 @@ TEST_F(BufferAssignmentTest, TupleChainedCallAsOutput) {
GetAllocation(*assignment, c_call, /*index=*/{0}));
EXPECT_EQ(GetAllocation(*assignment, c_call, /*index=*/{0}),
GetAllocation(*assignment, d_param, /*index=*/{0}));
- // The parameters aren't aliased with anything.
+
EXPECT_TRUE(BuffersDistinct({a_param}, {b_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {c_param}, *assignment));
EXPECT_TRUE(BuffersDistinct({a_param}, {d_param}, *assignment));
- EXPECT_TRUE(BuffersDistinct({b_param}, {c_param}, *assignment));
- EXPECT_TRUE(BuffersDistinct({b_param}, {d_param}, *assignment));
- EXPECT_TRUE(BuffersDistinct({c_param}, {d_param}, *assignment));
+
+ EXPECT_EQ(GetAllocation(*assignment, b_param, /*index=*/{0}),
+ GetAllocation(*assignment, c_param, /*index=*/{0}));
+ EXPECT_EQ(GetAllocation(*assignment, c_param, /*index=*/{0}),
+ GetAllocation(*assignment, d_param, /*index=*/{0}));
}
TEST_F(BufferAssignmentTest, BitcastAsOutput) {
@@ -1640,6 +1672,66 @@ TEST_F(BufferAssignmentTest, PeakBuffersWhile) {
nonbcast_buffer->instruction() == condition->parameter_instruction(0));
}
+TEST_F(BufferAssignmentTest, ConstantBuffersAreNotReused) {
+ const char* hlo_text = R"(
+HloModule Module
+
+True {
+ ROOT x.0.1 = f32[] parameter(0)
+}
+
+False {
+ x.0.0 = f32[] parameter(0)
+ ROOT copy.1 = f32[] copy(x.0.0)
+}
+
+ENTRY main {
+ pred.1.0 = pred[] parameter(0)
+ constant.1.1 = f32[] constant(56)
+ copy.2 = f32[] copy(constant.1.1)
+ constant.1.2 = f32[] constant(12)
+ ROOT conditional.1.3 = f32[] conditional(pred.1.0, copy.2, constant.1.2),
+ true_computation=True, false_computation=False
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(hlo_text));
+
+ HloInstruction* constant_1 =
+ module->entry_computation()->GetInstructionWithName("constant.1.1");
+ HloInstruction* constant_2 =
+ module->entry_computation()->GetInstructionWithName("constant.1.2");
+
+ auto buffers = RunBufferAssignment(module.get());
+
+ {
+ const BufferAllocation& allocation_for_const_1 =
+ GetTopLevelAllocation(*buffers, constant_1);
+ EXPECT_TRUE(allocation_for_const_1.is_constant());
+ for (const auto& buffer_offset_pair :
+ allocation_for_const_1.assigned_buffers()) {
+ EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
+ HloOpcode::kCopy);
+ EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
+ HloOpcode::kConditional);
+ }
+ }
+
+ {
+ const BufferAllocation& allocation_for_const_2 =
+ GetTopLevelAllocation(*buffers, constant_2);
+ EXPECT_TRUE(allocation_for_const_2.is_constant());
+ for (const auto& buffer_offset_pair :
+ allocation_for_const_2.assigned_buffers()) {
+ EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
+ HloOpcode::kCopy);
+ EXPECT_NE(buffer_offset_pair.first->instruction()->opcode(),
+ HloOpcode::kConditional);
+ }
+ }
+}
+
class WhileBufferAssignmentTest : public HloTestBase {
protected:
std::unique_ptr<HloComputation> BuildWhileConditionComputation(
@@ -1679,7 +1771,9 @@ class WhileBufferAssignmentTest : public HloTestBase {
return BufferAssigner::Run(
module, xla::MakeUnique<SequentialHloOrdering>(module, sequence),
ByteSizeOf,
- [alignment](LogicalBuffer::Color) { return alignment; })
+ [alignment](LogicalBuffer::Color) { return alignment; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true)
.ConsumeValueOrDie();
}
@@ -1923,7 +2017,9 @@ TEST_F(WhileBufferAssignmentTest, ColocatedBuffers) {
module.get(),
xla::MakeUnique<SequentialHloOrdering>(module.get(), sequence),
backend().compiler()->BufferSizeBytesFunction(),
- [](LogicalBuffer::Color) { return 1; }));
+ [](LogicalBuffer::Color) { return 1; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true));
// The result tuple elements must be assigned with different buffers.
TF_ASSERT_OK_AND_ASSIGN(auto slice0, assignment->GetUniqueSlice(tuple, {0}));
@@ -2031,6 +2127,56 @@ TEST_F(BufferAssignmentTest, TwoCalls) {
EXPECT_TRUE(BuffersDistinct({call1}, {call2}, *assignment));
}
+TEST_F(BufferAssignmentTest, CallParamCoAllocation) {
+ const char* hlo_text = R"(
+HloModule CallParamCoAllocation
+
+Callee {
+ param0 = (f32[100],(f32[200],f32[300])) parameter(0)
+ param1 = s32[20] parameter(1)
+ ROOT constant = f32[] constant(1)
+}
+
+ENTRY Main {
+ entry_param0 = f32[100] parameter(0)
+ entry_param1 = s32[20] parameter(1)
+ custom_call = (f32[200],f32[300]) custom-call(), custom_call_target="call-target"
+ call_op0 = (f32[100],(f32[200],f32[300])) tuple(entry_param0, custom_call)
+ ROOT call_result = f32[] call(call_op0, entry_param1), to_apply=Callee
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(
+ std::unique_ptr<HloModule> module,
+ HloRunner::CreateModuleFromString(
+ hlo_text, legacy_flags::GetDebugOptionsFromFlags()));
+
+ auto buffers = RunBufferAssignment(module.get());
+
+ HloComputation* main = module->entry_computation();
+ HloComputation* callee = module->GetComputationWithName("Callee");
+ EXPECT_NE(callee, nullptr);
+
+ HloInstruction* param0 = callee->parameter_instruction(0);
+ HloInstruction* param1 = callee->parameter_instruction(1);
+
+ HloInstruction* entry_param0 = main->parameter_instruction(0);
+ HloInstruction* entry_param1 = main->parameter_instruction(1);
+ HloInstruction* custom_call = main->GetInstructionWithName("custom_call");
+
+ EXPECT_EQ(GetAllocation(*buffers, entry_param0, {}),
+ GetAllocation(*buffers, param0, {0}));
+ EXPECT_EQ(GetAllocation(*buffers, entry_param1, {}),
+ GetAllocation(*buffers, param1, {}));
+
+ EXPECT_EQ(GetAllocation(*buffers, custom_call, {}),
+ GetAllocation(*buffers, param0, {1}));
+ EXPECT_EQ(GetAllocation(*buffers, custom_call, {0}),
+ GetAllocation(*buffers, param0, {1, 0}));
+ EXPECT_EQ(GetAllocation(*buffers, custom_call, {1}),
+ GetAllocation(*buffers, param0, {1, 1}));
+}
+
static bool IsPostOrderTraversal(
const std::vector<const HloInstruction*>& sequence) {
tensorflow::gtl::FlatSet<const HloInstruction*> seen_so_far;
@@ -2127,7 +2273,9 @@ TEST_F(WhileBufferAssignmentTest, WhileLoopsInterferingResultRange) {
BufferAssigner::Run(
module.get(),
xla::MakeUnique<SequentialHloOrdering>(module.get(), sequence),
- ByteSizeOf, [](LogicalBuffer::Color) { return 1; })
+ ByteSizeOf, [](LogicalBuffer::Color) { return 1; },
+ /*allow_input_output_aliasing=*/false,
+ /*allocate_buffers_for_constants=*/true)
.ConsumeValueOrDie();
EXPECT_TRUE(BuffersDistinct({while0}, {while1}, *assignment));
diff --git a/tensorflow/compiler/xla/service/channel_tracker.cc b/tensorflow/compiler/xla/service/channel_tracker.cc
index a5b392cbc3..13008efed1 100644
--- a/tensorflow/compiler/xla/service/channel_tracker.cc
+++ b/tensorflow/compiler/xla/service/channel_tracker.cc
@@ -31,16 +31,23 @@ namespace xla {
ChannelTracker::ChannelTracker() : next_channel_(1) {}
-ChannelHandle ChannelTracker::NewChannel() {
+StatusOr<ChannelHandle> ChannelTracker::NewChannel(
+ ChannelHandle::ChannelType type) {
+ if (type != ChannelHandle::DEVICE_TO_DEVICE &&
+ type != ChannelHandle::HOST_TO_DEVICE &&
+ type != ChannelHandle::DEVICE_TO_HOST) {
+ return InvalidArgument("Invalid channel type: %d", type);
+ }
tensorflow::mutex_lock lock(channel_mutex_);
// Create a new channel handle with a unique value.
- const ChannelHandle new_handle = AllocateHandle();
+ ChannelHandle new_handle = AllocateHandle(type);
// Register a channel object associated with the handle.
Channel channel;
channel.has_sender = false;
channel.receiver_count = 0;
+ channel.type = type;
opaque_to_channel_[new_handle.handle()] = channel;
return new_handle;
@@ -56,10 +63,11 @@ Status ChannelTracker::RegisterRecv(const ChannelHandle& handle) {
return RegisterRecvInternal(handle);
}
-ChannelHandle ChannelTracker::AllocateHandle() {
+ChannelHandle ChannelTracker::AllocateHandle(ChannelHandle::ChannelType type) {
int64 handle_value = next_channel_++;
ChannelHandle result;
result.set_handle(handle_value);
+ result.set_type(type);
return result;
}
@@ -68,6 +76,13 @@ Status ChannelTracker::RegisterSendInternal(const ChannelHandle& handle) {
return NotFound("channel handle not found: %lld", handle.handle());
}
Channel& channel = opaque_to_channel_[handle.handle()];
+ if (channel.type == ChannelHandle::HOST_TO_DEVICE) {
+ return FailedPrecondition(
+ "host-to-device channels cannot be used with a Send operation; "
+ "channel handle: %lld",
+ handle.handle());
+ }
+
if (channel.has_sender) {
return FailedPrecondition(
"when registering send, passed a channel handle that is already used "
@@ -83,6 +98,13 @@ Status ChannelTracker::RegisterRecvInternal(const ChannelHandle& handle) {
return NotFound("channel handle not found: %lld", handle.handle());
}
Channel& channel = opaque_to_channel_[handle.handle()];
+ if (channel.type == ChannelHandle::DEVICE_TO_HOST) {
+ return FailedPrecondition(
+ "device-to-host channels cannot be used with a Recv operation; "
+ "channel handle: %lld",
+ handle.handle());
+ }
+
// TODO(b/33942691): Allow more than 1 receivers for broadcast.
if (channel.receiver_count >= 1) {
return FailedPrecondition(
diff --git a/tensorflow/compiler/xla/service/channel_tracker.h b/tensorflow/compiler/xla/service/channel_tracker.h
index fac0afd672..d773558c28 100644
--- a/tensorflow/compiler/xla/service/channel_tracker.h
+++ b/tensorflow/compiler/xla/service/channel_tracker.h
@@ -48,11 +48,12 @@ class ChannelTracker {
struct Channel {
bool has_sender;
int64 receiver_count;
+ ChannelHandle::ChannelType type;
};
// Creates a new Channel object and returns the corresponding
// ChannelHandle for it.
- ChannelHandle NewChannel();
+ StatusOr<ChannelHandle> NewChannel(ChannelHandle::ChannelType type);
// Informs that the given channel handle is used for a Send operation.
// Returns an error status if the handle is already used by another Send.
@@ -65,7 +66,8 @@ class ChannelTracker {
private:
// Bumps the next_channel_ number and returns the allocated number
// wrapped in a ChannelHandle.
- ChannelHandle AllocateHandle() EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
+ ChannelHandle AllocateHandle(ChannelHandle::ChannelType type)
+ EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
Status RegisterSendInternal(const ChannelHandle& handle)
EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
diff --git a/tensorflow/compiler/xla/service/copy_insertion.cc b/tensorflow/compiler/xla/service/copy_insertion.cc
index ab3d846403..36fb9b43aa 100644
--- a/tensorflow/compiler/xla/service/copy_insertion.cc
+++ b/tensorflow/compiler/xla/service/copy_insertion.cc
@@ -76,15 +76,6 @@ SpecialCaseCopyPolicy GetSpecialCaseCopyPolicy(const CallGraphNode& node,
policy.copy_parameters_and_constants = true;
policy.copy_root_replicated_buffers = true;
}
- for (const CallSite& site : node.caller_callsites()) {
- // The AddCopiesForConditional() already adds copies, but the copy remover
- // removes them, so we re-add them by returning the policy here. But really
- // the copy remover should not be removing them.
- if (site.instruction()->opcode() == HloOpcode::kConditional) {
- policy.copy_parameters_and_constants = true;
- policy.copy_root_replicated_buffers = true;
- }
- }
return policy;
}
@@ -360,26 +351,6 @@ Status StripControlDependenciesFrom(HloInstruction* instruction) {
return Status::OK();
}
-// Add kCopy instructions to the given module to guarantee there is no
-// live-range interference. Generally interference can only occur around kWhile
-// instructions which have update-in-place semantics.
-Status AddCopiesToResolveInterference(HloModule* module) {
- TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
- HloAliasAnalysis::Run(module));
-
- for (HloComputation* computation : module->computations()) {
- for (HloInstruction* instruction : computation->instructions()) {
- if (instruction->opcode() == HloOpcode::kWhile) {
- TF_RETURN_IF_ERROR(AddCopiesForWhile(*alias_analysis, instruction));
- } else if (instruction->opcode() == HloOpcode::kConditional) {
- TF_RETURN_IF_ERROR(
- AddCopiesForConditional(*alias_analysis, instruction));
- }
- }
- }
- return Status::OK();
-}
-
// Class for removing unnecessary copies from the module.
//
// kCopy instructions are added conservatively to guarantee no live range
@@ -954,6 +925,36 @@ class CopyRemover {
BufferValueTracker buffer_value_tracker_;
};
+void MaybeDumpModule(const string& message, const HloModule& module) {
+ if (VLOG_IS_ON(3)) {
+ VLOG(3) << message;
+ XLA_VLOG_LINES(3, module.ToString());
+ hlo_graph_dumper::MaybeDumpHloModule(module, message);
+ }
+}
+
+} // namespace
+
+// Add kCopy instructions to the given module to guarantee there is no
+// live-range interference. Generally interference can only occur around kWhile
+// instructions which have update-in-place semantics.
+Status CopyInsertion::AddCopiesToResolveInterference(HloModule* module) {
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
+ HloAliasAnalysis::Run(module, fusion_can_share_buffer_));
+
+ for (HloComputation* computation : module->computations()) {
+ for (HloInstruction* instruction : computation->instructions()) {
+ if (instruction->opcode() == HloOpcode::kWhile) {
+ TF_RETURN_IF_ERROR(AddCopiesForWhile(*alias_analysis, instruction));
+ } else if (instruction->opcode() == HloOpcode::kConditional) {
+ TF_RETURN_IF_ERROR(
+ AddCopiesForConditional(*alias_analysis, instruction));
+ }
+ }
+ }
+ return Status::OK();
+}
+
// Add copies to address special constraints on the roots of computations not
// related to live range interference:
//
@@ -964,9 +965,10 @@ class CopyRemover {
//
// (3) Constants and parameters cannot be live out of the entry computation
//
-Status AddSpecialCaseCopies(const CallGraph& call_graph, HloModule* module) {
+Status CopyInsertion::AddSpecialCaseCopies(const CallGraph& call_graph,
+ HloModule* module) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
- HloAliasAnalysis::Run(module));
+ HloAliasAnalysis::Run(module, fusion_can_share_buffer_));
// Identify which shape indices of which instructions need to be copied. Store
// these results in 'instructions_to_copy'.
@@ -1074,32 +1076,20 @@ Status AddSpecialCaseCopies(const CallGraph& call_graph, HloModule* module) {
return Status::OK();
}
-Status VerifyNoLiveRangeInterference(HloModule* module) {
+Status CopyInsertion::VerifyNoLiveRangeInterference(HloModule* module) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
- HloAliasAnalysis::Run(module));
+ HloAliasAnalysis::Run(module, fusion_can_share_buffer_));
DependencyHloOrdering ordering(module);
TF_RET_CHECK(!alias_analysis->HasLiveRangeInterference(ordering));
return Status::OK();
}
-void MaybeDumpModule(const string& message, const HloModule& module) {
- if (VLOG_IS_ON(3)) {
- VLOG(3) << message;
- XLA_VLOG_LINES(3, module.ToString());
- hlo_graph_dumper::MaybeDumpHloModule(module, message);
- }
-}
-
-} // namespace
-
-Status RemoveUnnecessaryCopies(
- const HloOrdering& ordering, HloModule* module,
- const HloDataflowAnalysis::FusionCanShareBufferFunction&
- fusion_can_share_buffer) {
+Status CopyInsertion::RemoveUnnecessaryCopies(const HloOrdering& ordering,
+ HloModule* module) {
MaybeDumpModule("after adding copies to resolve interference", *module);
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
- HloAliasAnalysis::Run(module, fusion_can_share_buffer));
+ HloAliasAnalysis::Run(module, fusion_can_share_buffer_));
CopyRemover copy_remover(*alias_analysis, ordering, module);
XLA_VLOG_LINES(3, copy_remover.ToString());
diff --git a/tensorflow/compiler/xla/service/copy_insertion.h b/tensorflow/compiler/xla/service/copy_insertion.h
index e1973db928..5ba64b78a3 100644
--- a/tensorflow/compiler/xla/service/copy_insertion.h
+++ b/tensorflow/compiler/xla/service/copy_insertion.h
@@ -71,20 +71,26 @@ class CopyInsertion : public HloPassInterface {
// TODO(b/62548313): Remove this when buffer assignment is module-scoped.
static StatusOr<bool> AddCopiesForBufferAssignment(HloModule* module);
+ // Try to remove as many copies from the module as possible without
+ // introducing live range interference. Only copy instructions that are
+ // eligible for copy elision are considered for removal.
+ Status RemoveUnnecessaryCopies(const HloOrdering& ordering,
+ HloModule* module);
+
private:
+ // Verifies that no HLO values have interfering live ranged assuming the
+ // ordering used by copy insertion.
+ Status VerifyNoLiveRangeInterference(HloModule* module);
+
+ Status AddCopiesToResolveInterference(HloModule* module);
+
+ Status AddSpecialCaseCopies(const CallGraph& call_graph, HloModule* module);
+
// Backend specific function that decides whether a fusion can share buffer
// with its operand.
HloDataflowAnalysis::FusionCanShareBufferFunction fusion_can_share_buffer_;
};
-// Try to remove as many copies from the module as possible without introducing
-// live range interference. Only copy instructions that are eligible for
-// copy elision are considered for removal.
-Status RemoveUnnecessaryCopies(
- const HloOrdering& ordering, HloModule* module,
- const HloDataflowAnalysis::FusionCanShareBufferFunction&
- fusion_can_share_buffer = nullptr);
-
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_COPY_INSERTION_H_
diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD
index c45d914e93..ace9f96cfb 100644
--- a/tensorflow/compiler/xla/service/cpu/BUILD
+++ b/tensorflow/compiler/xla/service/cpu/BUILD
@@ -252,12 +252,12 @@ cc_library(
"//tensorflow/compiler/xla/service:hlo_module_config",
"//tensorflow/compiler/xla/service:name_uniquer",
"//tensorflow/compiler/xla/service/llvm_ir:alias_analysis",
+ "//tensorflow/compiler/xla/service/llvm_ir:dynamic_update_slice_util",
"//tensorflow/compiler/xla/service/llvm_ir:fused_ir_emitter",
"//tensorflow/compiler/xla/service/llvm_ir:ir_array",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_loop",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/compiler/xla/service/llvm_ir:loop_emitter",
- "//tensorflow/compiler/xla/service/llvm_ir:ops",
"//tensorflow/compiler/xla/service/llvm_ir:tuple_ops",
"//tensorflow/core:lib",
"@llvm//:code_gen",
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
index 6dfc666f09..593575c0fd 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
@@ -39,13 +39,14 @@ class CpuTransferManager : public GenericTransferManager {
Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) override;
- Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
- const void* source) override;
Status TransferLiteralFromOutfeed(se::StreamExecutor* executor,
const Shape& literal_shape,
Literal* literal) override;
private:
+ Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
+ const void* source);
+
// Transfers infeed data to device. InfeedBuffer->Done() must be
// called to clean up the memory allocated for InfeedBuffer.
StatusOr<cpu::runtime::XfeedBuffer*> TransferBufferToInfeedInternal(
diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
index 58228180ca..1fdeceb860 100644
--- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.cc
@@ -49,15 +49,15 @@ class MemoryTile {
// `tile_size_along_major_dim` vectors from the matrix `matrix`, starting at
// `major_dim_offset` in the major dimension. The tile size along the minor
// dimension is the vector size, and that is implicitly determined by `vsl`.
- MemoryTile(VectorSupportLibrary* vsl, llvm::IRBuilder<>* ir_builder,
+ MemoryTile(VectorSupportLibrary* vsl, llvm::IRBuilder<>* b,
llvm::Value* matrix, int64 matrix_size_along_minor_dim,
llvm::Value* major_dim_offset, int64 tile_size_along_major_dim)
- : vsl_(vsl), ir_builder_(ir_builder) {
+ : vsl_(vsl), b_(b) {
pointers_.reserve(tile_size_along_major_dim);
for (int64 i = 0; i < tile_size_along_major_dim; i++) {
- llvm::Value* total_offset = ir_builder->CreateMul(
- ir_builder->getInt64(matrix_size_along_minor_dim),
- ir_builder->CreateAdd(ir_builder->getInt64(i), major_dim_offset));
+ llvm::Value* total_offset =
+ b->CreateMul(b->getInt64(matrix_size_along_minor_dim),
+ b->CreateAdd(b->getInt64(i), major_dim_offset));
pointers_.push_back(vsl_->ComputeOffsetPointer(matrix, total_offset));
}
}
@@ -101,8 +101,7 @@ class MemoryTile {
for (int64 i = 0; i < pointers_.size(); i++) {
for (int64 j = 0; j < tile_size_along_middle_dim; j++) {
result[i].push_back(vsl_->LoadBroadcast(
- pointers_[i], ir_builder_->CreateAdd(minor_dim_offset,
- ir_builder_->getInt64(j))));
+ pointers_[i], b_->CreateAdd(minor_dim_offset, b_->getInt64(j))));
}
}
return result;
@@ -110,7 +109,7 @@ class MemoryTile {
private:
VectorSupportLibrary* vsl_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
std::vector<llvm::Value*> pointers_;
};
@@ -249,16 +248,15 @@ class ColumnMajorMatrixVectorProductEmitter
ColumnMajorMatrixVectorProductEmitter(const Config& config, llvm::Value* lhs,
llvm::Value* rhs, llvm::Value* addend,
llvm::Value* result,
- llvm::IRBuilder<>* ir_builder)
+ llvm::IRBuilder<>* b)
: config_(config),
lhs_(lhs),
rhs_(rhs),
addend_(addend),
result_(result),
- ir_builder_(ir_builder),
- ksl_(ir_builder_),
- vsl_(config.scalar_type(), /*vector_size=*/config.tile_rows(),
- ir_builder_, "") {
+ b_(b),
+ ksl_(b_),
+ vsl_(config.scalar_type(), /*vector_size=*/config.tile_rows(), b_, "") {
CHECK(tile_rows() > 0 && IsPowerOfTwo(static_cast<uint64>(tile_rows())));
CHECK(!has_addend() || addend != nullptr);
}
@@ -272,7 +270,7 @@ class ColumnMajorMatrixVectorProductEmitter
bool is_first_column);
MemoryTile GetLhsMemoryTile(llvm::Value* column_start, int64 column_count) {
- return MemoryTile(&vsl_, ir_builder_, /*matrix=*/lhs_,
+ return MemoryTile(&vsl_, b_, /*matrix=*/lhs_,
/*matrix_size_along_minor_dim=*/m(),
/*major_dim_offset=*/column_start,
/*tile_size_along_major_dim=*/column_count);
@@ -302,7 +300,7 @@ class ColumnMajorMatrixVectorProductEmitter
llvm::Value* rhs_;
llvm::Value* addend_;
llvm::Value* result_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
KernelSupportLibrary ksl_;
VectorSupportLibrary vsl_;
};
@@ -331,7 +329,7 @@ void ColumnMajorMatrixVectorProductEmitter::Emit() {
});
if (column_remainder != 0) {
- EmitOuterLoopBody(ir_builder_->getInt64(column_limit), column_remainder,
+ EmitOuterLoopBody(b_->getInt64(column_limit), column_remainder,
column_limit == 0);
}
}
@@ -364,7 +362,7 @@ void ColumnMajorMatrixVectorProductEmitter::EmitInnerLoopEpilogue(
return;
}
- llvm::Value* columns_llvm = ir_builder_->getInt64(columns);
+ llvm::Value* columns_llvm = b_->getInt64(columns);
// for (col = current_tile_col; col < (columns + current_tile_col); col++)
// for (row = row_start, row < m_; row++) {
@@ -375,12 +373,11 @@ void ColumnMajorMatrixVectorProductEmitter::EmitInnerLoopEpilogue(
ksl_.ForReturnVoid(
"dot.inner.epilg.outer", /*start=*/current_tile_col,
- /*end=*/ir_builder_->CreateAdd(columns_llvm, current_tile_col),
+ /*end=*/b_->CreateAdd(columns_llvm, current_tile_col),
/*step=*/1, /*peel_first_iteration=*/false,
[&](llvm::Value* col, llvm::Value* is_first_scalar_col) {
llvm::Value* rhs_element = vsl_.LoadScalar(rhs_, col);
- llvm::Value* total_offset =
- ir_builder_->CreateMul(col, ir_builder_->getInt64(m()));
+ llvm::Value* total_offset = b_->CreateMul(col, b_->getInt64(m()));
llvm::Value* lhs_base_pointer =
vsl_.ComputeOffsetPointer(lhs_, total_offset);
ksl_.ForReturnVoid(
@@ -388,9 +385,8 @@ void ColumnMajorMatrixVectorProductEmitter::EmitInnerLoopEpilogue(
/*step=*/1, [&](llvm::Value* scalar_row) {
llvm::Value* product = vsl_.Mul(
vsl_.LoadScalar(lhs_base_pointer, scalar_row), rhs_element);
- llvm::Value* setting_result_first_time = ir_builder_->CreateAnd(
- is_first_scalar_col,
- ir_builder_->getInt1(is_first_tiled_column));
+ llvm::Value* setting_result_first_time = b_->CreateAnd(
+ is_first_scalar_col, b_->getInt1(is_first_tiled_column));
ksl_.IfReturnVoid(
setting_result_first_time,
/*true_block_generator=*/
@@ -478,16 +474,15 @@ class RowMajorMatrixVectorProductEmitter
RowMajorMatrixVectorProductEmitter(const Config& config, llvm::Value* lhs,
llvm::Value* rhs, llvm::Value* addend,
- llvm::Value* result,
- llvm::IRBuilder<>* ir_builder)
+ llvm::Value* result, llvm::IRBuilder<>* b)
: config_(config),
lhs_(lhs),
rhs_(rhs),
addend_(addend),
result_(result),
- ir_builder_(ir_builder),
- ksl_(ir_builder_),
- vsl_(scalar_type(), /*vector_size=*/tile_cols(), ir_builder_, "") {
+ b_(b),
+ ksl_(b_),
+ vsl_(scalar_type(), /*vector_size=*/tile_cols(), b_, "") {
CHECK(tile_cols() > 0 && IsPowerOfTwo(static_cast<uint64>(tile_cols())));
CHECK(!has_addend() || addend != nullptr);
}
@@ -498,7 +493,7 @@ class RowMajorMatrixVectorProductEmitter
private:
MemoryTile GetLhsMemoryTile(llvm::Value* row_start, int64 row_count) {
- return MemoryTile(&vsl_, ir_builder_, /*matrix=*/lhs_,
+ return MemoryTile(&vsl_, b_, /*matrix=*/lhs_,
/*matrix_size_along_minor_dim=*/k(),
/*major_dim_offset=*/row_start,
/*tile_size_along_major_dim=*/row_count);
@@ -517,7 +512,7 @@ class RowMajorMatrixVectorProductEmitter
llvm::Value* rhs_;
llvm::Value* addend_;
llvm::Value* result_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
KernelSupportLibrary ksl_;
VectorSupportLibrary vsl_;
};
@@ -559,7 +554,7 @@ void RowMajorMatrixVectorProductEmitter::EmitOuterLoopBody(llvm::Value* row,
for (int i = 0; i < row_count; i++) {
llvm::Value* result_value =
vsl_.Add(horizontal_sums[i], scalar_accumulators[i].Get());
- llvm::Value* offset = ir_builder_->CreateAdd(ir_builder_->getInt64(i), row);
+ llvm::Value* offset = b_->CreateAdd(b_->getInt64(i), row);
if (addend_ && row_count != vsl_.vector_size()) {
result_value = vsl_.Add(vsl_.LoadScalar(addend_, offset), result_value);
}
@@ -578,7 +573,7 @@ void RowMajorMatrixVectorProductEmitter::Emit() {
[&](llvm::Value* row) { EmitOuterLoopBody(row, tile_rows()); });
if (row_remainder != 0) {
- EmitOuterLoopBody(ir_builder_->getInt64(row_limit), row_remainder);
+ EmitOuterLoopBody(b_->getInt64(row_limit), row_remainder);
}
}
@@ -609,9 +604,8 @@ void RowMajorMatrixVectorProductEmitter::EmitInnerLoopEpilogue(
}
for (int r = 0; r < rows; r++) {
- llvm::Value* total_offset = ir_builder_->CreateMul(
- ir_builder_->CreateAdd(ir_builder_->getInt64(r), current_tile_row),
- ir_builder_->getInt64(k()));
+ llvm::Value* total_offset = b_->CreateMul(
+ b_->CreateAdd(b_->getInt64(r), current_tile_row), b_->getInt64(k()));
llvm::Value* lhs_base_pointer =
vsl_.ComputeOffsetPointer(lhs_, total_offset);
ksl_.ForReturnVoid(
@@ -722,13 +716,13 @@ class MatrixMatrixBlockPanelEmitter {
// `lhs` with `rhs` and stores the result in `result`.
explicit MatrixMatrixBlockPanelEmitter(Config config, llvm::Value* lhs,
llvm::Value* rhs, llvm::Value* result,
- llvm::IRBuilder<>* ir_builder)
+ llvm::IRBuilder<>* b)
: lhs_(lhs),
rhs_(rhs),
result_(result),
config_(config),
- ir_builder_(ir_builder),
- ksl_(ir_builder_) {
+ b_(b),
+ ksl_(b_) {
CHECK(max_vectorization_width() > 0 &&
IsPowerOfTwo(static_cast<uint64>(max_vectorization_width())));
CHECK_GT(max_vector_count(), 0);
@@ -761,7 +755,7 @@ class MatrixMatrixBlockPanelEmitter {
int64 tile_size_m, llvm::Value* m_start,
llvm::Value* m_end);
- llvm::Value* GetInt64(int64 value) { return ir_builder_->getInt64(value); }
+ llvm::Value* GetInt64(int64 value) { return b_->getInt64(value); }
Config config() const { return config_; }
Dimensions dims() const { return config().dims(); }
@@ -782,7 +776,7 @@ class MatrixMatrixBlockPanelEmitter {
llvm::Value* result_;
Config config_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
KernelSupportLibrary ksl_;
};
@@ -804,8 +798,8 @@ void MatrixMatrixBlockPanelEmitter::HandleResiduesOnN() {
current_vectorization_width >= min_vectorization_width()) {
int64 n_end = dims().n() - (dims().n() % current_vectorization_width);
if (n_start != n_end) {
- VectorSupportLibrary vsl(scalar_type(), current_vectorization_width,
- ir_builder_, "gebp");
+ VectorSupportLibrary vsl(scalar_type(), current_vectorization_width, b_,
+ "gebp");
HandleResiduesOnK(&vsl, GetInt64(n_start), GetInt64(n_end));
n_start = n_end;
}
@@ -819,10 +813,9 @@ void MatrixMatrixBlockPanelEmitter::HandleResiduesOnN() {
}
if (n_start != dims().n()) {
- VectorSupportLibrary vsl(scalar_type(), 1, ir_builder_, "gebp");
+ VectorSupportLibrary vsl(scalar_type(), 1, b_, "gebp");
ksl_.ForReturnVoid("epi.n", n_start, dims().n(), 1, [&](llvm::Value* n_i) {
- llvm::Value* n_i_next =
- ir_builder_->CreateAdd(n_i, ir_builder_->getInt64(1));
+ llvm::Value* n_i_next = b_->CreateAdd(n_i, b_->getInt64(1));
HandleResiduesOnK(&vsl, n_i, n_i_next);
});
}
@@ -935,11 +928,11 @@ void MatrixMatrixBlockPanelEmitter::EmitTiledGemm(
ksl_.ForReturnVoid(
"dot.m", m_start, m_end, tile_size_m, [&](llvm::Value* m_i) {
MemoryTile result_memory_tile(
- vsl, ir_builder_, /*matrix=*/result_,
+ vsl, b_, /*matrix=*/result_,
/*matrix_size_along_minor_dim=*/dims().n(),
/*major_dim_offset=*/m_i,
/*tile_size_along_major_dim=*/tile_size_m);
- MemoryTile lhs_memory_tile(vsl, ir_builder_, /*matrix=*/lhs_,
+ MemoryTile lhs_memory_tile(vsl, b_, /*matrix=*/lhs_,
/*matrix_size_along_minor_dim=*/dims().k(),
/*major_dim_offset=*/m_i,
/*tile_size_along_major_dim=*/tile_size_m);
@@ -949,8 +942,8 @@ void MatrixMatrixBlockPanelEmitter::EmitTiledGemm(
result_memory_tile.LoadTile(n_i));
ksl_.ForReturnVoid(
"dot.k", k_start, k_end, tile_size_k, [&](llvm::Value* k_i) {
- MemoryTile rhs_memory_tile(vsl, ir_builder_, rhs_,
- dims().n(), k_i, tile_size_k);
+ MemoryTile rhs_memory_tile(vsl, b_, rhs_, dims().n(), k_i,
+ tile_size_k);
std::vector<std::vector<llvm::Value*>> lhs_tile =
lhs_memory_tile.LoadBroadcastTile(k_i, tile_size_k);
std::vector<llvm::Value*> rhs_tile =
@@ -980,7 +973,7 @@ DotOpEmitter::DotOpEmitter(const HloInstruction& dot,
const llvm_ir::IrArray& rhs_array,
const llvm_ir::IrArray* addend_array,
llvm::Value* executable_run_options_value,
- llvm::IRBuilder<>* ir_builder,
+ llvm::IRBuilder<>* b,
const HloModuleConfig& hlo_module_config,
const TargetMachineFeatures& target_machine_features)
: dot_(dot),
@@ -989,7 +982,7 @@ DotOpEmitter::DotOpEmitter(const HloInstruction& dot,
rhs_array_(rhs_array),
addend_array_(addend_array),
executable_run_options_value_(executable_run_options_value),
- ir_builder_(ir_builder),
+ b_(b),
hlo_module_config_(hlo_module_config),
target_machine_features_(target_machine_features) {}
@@ -997,15 +990,14 @@ DotOpEmitter::DotOpEmitter(const HloInstruction& dot,
const HloInstruction& dot, const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& lhs_array, const llvm_ir::IrArray& rhs_array,
const llvm_ir::IrArray* addend_array,
- llvm::Value* executable_run_options_value, llvm::IRBuilder<>* ir_builder,
+ llvm::Value* executable_run_options_value, llvm::IRBuilder<>* b,
const HloModuleConfig& hlo_module_config,
const TargetMachineFeatures& target_machine_features) {
PrimitiveType type = target_array.GetShape().element_type();
TF_RET_CHECK(F16 == type || F32 == type || F64 == type || C64 == type);
DotOpEmitter dot_emitter(dot, target_array, lhs_array, rhs_array,
- addend_array, executable_run_options_value,
- ir_builder, hlo_module_config,
- target_machine_features);
+ addend_array, executable_run_options_value, b,
+ hlo_module_config, target_machine_features);
return dot_emitter.Emit();
}
@@ -1050,13 +1042,13 @@ bool DotOpEmitter::EmitExperimentalGebpDotIfEnabled(
}
int64 size_bytes = m * n * ShapeUtil::ByteSizeOfPrimitiveType(primitive_type);
- ir_builder_->CreateMemSet(
- target, ir_builder_->getInt8(0), size_bytes,
+ b_->CreateMemSet(
+ target, b_->getInt8(0), size_bytes,
target_machine_features_.minimum_alignment_for_allocation(size_bytes));
int64 max_target_vector_width =
target_machine_features_.vector_register_num_elements(
- *ir_builder_->GetInsertBlock()->getParent(), primitive_type);
+ *b_->GetInsertBlock()->getParent(), primitive_type);
int64 tile_size_m, tile_size_k, tile_size_n_in_vector_width;
std::tie(tile_size_m, tile_size_k, tile_size_n_in_vector_width) =
@@ -1080,12 +1072,12 @@ bool DotOpEmitter::EmitExperimentalGebpDotIfEnabled(
KernelSupportLibrary::EmitAndCallOutlinedKernel(
/*enable_fast_math=*/enable_fast_math,
- /*optimize_for_size=*/optimize_for_size, ir_builder_,
- config.GetCacheKey(), lhs, rhs, target,
+ /*optimize_for_size=*/optimize_for_size, b_, config.GetCacheKey(), lhs,
+ rhs, target,
[this, config](llvm::Value* lhs, llvm::Value* rhs, llvm::Value* target) {
- MatrixMatrixBlockPanelEmitter gebp_emitter(
- config, /*lhs=*/lhs, /*rhs=*/rhs,
- /*result=*/target, ir_builder_);
+ MatrixMatrixBlockPanelEmitter gebp_emitter(config, /*lhs=*/lhs,
+ /*rhs=*/rhs,
+ /*result=*/target, b_);
gebp_emitter.Emit();
});
@@ -1163,7 +1155,7 @@ bool DotOpEmitter::EmitLlvmIrDotIfProfitable() {
const int target_vector_register_element_size =
target_machine_features_.vector_register_num_elements(
- *ir_builder_->GetInsertBlock()->getParent(), primitive_type);
+ *b_->GetInsertBlock()->getParent(), primitive_type);
// We may not always know the vector register size for the target we're
// compiling against, in which case target_vector_register_element_size is 0.
@@ -1184,13 +1176,13 @@ bool DotOpEmitter::EmitLlvmIrDotIfProfitable() {
KernelSupportLibrary::EmitAndCallOutlinedKernel(
/*enable_fast_math=*/enable_fast_math,
- /*optimize_for_size=*/optimize_for_size, ir_builder_,
- config.GetCacheKey(), lhs_op, rhs_op,
+ /*optimize_for_size=*/optimize_for_size, b_, config.GetCacheKey(),
+ lhs_op, rhs_op,
addend_array_ ? addend_array_->GetBasePointer() : nullptr, result_op,
[this, config](llvm::Value* lhs_op, llvm::Value* rhs_op,
llvm::Value* addend_op, llvm::Value* result_op) {
ColumnMajorMatrixVectorProductEmitter emitter(
- config, lhs_op, rhs_op, addend_op, result_op, ir_builder_);
+ config, lhs_op, rhs_op, addend_op, result_op, b_);
emitter.Emit();
});
} else {
@@ -1203,13 +1195,13 @@ bool DotOpEmitter::EmitLlvmIrDotIfProfitable() {
KernelSupportLibrary::EmitAndCallOutlinedKernel(
/*enable_fast_math=*/enable_fast_math,
- /*optimize_for_size=*/optimize_for_size, ir_builder_,
- config.GetCacheKey(), lhs_op, rhs_op,
+ /*optimize_for_size=*/optimize_for_size, b_, config.GetCacheKey(),
+ lhs_op, rhs_op,
addend_array_ ? addend_array_->GetBasePointer() : nullptr, result_op,
[this, config](llvm::Value* lhs_op, llvm::Value* rhs_op,
llvm::Value* addend_op, llvm::Value* result_op) {
- RowMajorMatrixVectorProductEmitter emitter(
- config, lhs_op, rhs_op, addend_op, result_op, ir_builder_);
+ RowMajorMatrixVectorProductEmitter emitter(config, lhs_op, rhs_op,
+ addend_op, result_op, b_);
emitter.Emit();
});
}
@@ -1285,7 +1277,7 @@ Status DotOpEmitter::Emit() {
// Create loop nests which loop through the LHS operand dimensions and the RHS
// operand dimensions. The reduction dimension of the LHS and RHS are handled
// in a separate innermost loop which performs the sum of products.
- llvm_ir::ForLoopNest loop_nest(llvm_ir::IrName(&dot_), ir_builder_);
+ llvm_ir::ForLoopNest loop_nest(llvm_ir::IrName(&dot_), b_);
llvm_ir::IrArray::Index lhs_index = EmitOperandArrayLoopNest(
&loop_nest, lhs_array_, lhs_reduction_dimension, "lhs");
llvm_ir::IrArray::Index rhs_index = EmitOperandArrayLoopNest(
@@ -1319,62 +1311,55 @@ Status DotOpEmitter::Emit() {
// Function entry basic block.
// - Emit alloca for accumulator
llvm::Function* func = reduction_loop->GetPreheaderBasicBlock()->getParent();
- SetToFirstInsertPoint(&func->getEntryBlock(), ir_builder_);
+ SetToFirstInsertPoint(&func->getEntryBlock(), b_);
llvm::Type* accum_type = target_array_.GetElementLlvmType();
- llvm::Value* accum_address = ir_builder_->CreateAlloca(
- accum_type, /*ArraySize=*/nullptr, "accum_address");
+ llvm::Value* accum_address =
+ b_->CreateAlloca(accum_type, /*ArraySize=*/nullptr, "accum_address");
// Preheader basic block of reduction loop:
// - Initialize accumulator to zero.
llvm::BasicBlock* preheader_bb = reduction_loop->GetPreheaderBasicBlock();
- ir_builder_->SetInsertPoint(preheader_bb->getTerminator());
+ b_->SetInsertPoint(preheader_bb->getTerminator());
- ir_builder_->CreateStore(llvm::Constant::getNullValue(accum_type),
- accum_address);
+ b_->CreateStore(llvm::Constant::getNullValue(accum_type), accum_address);
// Body basic block of reduction loop:
// - Load elements from lhs and rhs array.
// - Multiply lhs-element and rhs-element.
// - Load accumulator and add to product.
// - Store sum back into accumulator.
- SetToFirstInsertPoint(reduction_loop->GetBodyBasicBlock(), ir_builder_);
+ SetToFirstInsertPoint(reduction_loop->GetBodyBasicBlock(), b_);
- llvm::Value* lhs_element =
- lhs_array_.EmitReadArrayElement(lhs_index, ir_builder_);
- llvm::Value* rhs_element =
- rhs_array_.EmitReadArrayElement(rhs_index, ir_builder_);
+ llvm::Value* lhs_element = lhs_array_.EmitReadArrayElement(lhs_index, b_);
+ llvm::Value* rhs_element = rhs_array_.EmitReadArrayElement(rhs_index, b_);
- llvm::Value* accum = ir_builder_->CreateLoad(accum_address);
+ llvm::Value* accum = b_->CreateLoad(accum_address);
llvm::Value* updated_accum;
if (ShapeUtil::ElementIsComplex(lhs_shape)) {
- auto real = [&](llvm::Value* x) {
- return ir_builder_->CreateExtractValue(x, {0});
- };
- auto imag = [&](llvm::Value* x) {
- return ir_builder_->CreateExtractValue(x, {1});
- };
- llvm::Value* product_real = ir_builder_->CreateFSub(
- ir_builder_->CreateFMul(real(lhs_element), real(rhs_element)),
- ir_builder_->CreateFMul(imag(lhs_element), imag(rhs_element)));
- llvm::Value* product_imag = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(real(lhs_element), imag(rhs_element)),
- ir_builder_->CreateFMul(imag(lhs_element), real(rhs_element)));
- updated_accum = ir_builder_->CreateInsertValue(
- accum, ir_builder_->CreateFAdd(real(accum), product_real), {0});
- updated_accum = ir_builder_->CreateInsertValue(
- updated_accum, ir_builder_->CreateFAdd(imag(accum), product_imag), {1});
+ auto real = [&](llvm::Value* x) { return b_->CreateExtractValue(x, {0}); };
+ auto imag = [&](llvm::Value* x) { return b_->CreateExtractValue(x, {1}); };
+ llvm::Value* product_real =
+ b_->CreateFSub(b_->CreateFMul(real(lhs_element), real(rhs_element)),
+ b_->CreateFMul(imag(lhs_element), imag(rhs_element)));
+ llvm::Value* product_imag =
+ b_->CreateFAdd(b_->CreateFMul(real(lhs_element), imag(rhs_element)),
+ b_->CreateFMul(imag(lhs_element), real(rhs_element)));
+ updated_accum = b_->CreateInsertValue(
+ accum, b_->CreateFAdd(real(accum), product_real), {0});
+ updated_accum = b_->CreateInsertValue(
+ updated_accum, b_->CreateFAdd(imag(accum), product_imag), {1});
} else {
- llvm::Value* product = ir_builder_->CreateFMul(lhs_element, rhs_element);
- updated_accum = ir_builder_->CreateFAdd(accum, product);
+ llvm::Value* product = b_->CreateFMul(lhs_element, rhs_element);
+ updated_accum = b_->CreateFAdd(accum, product);
}
- ir_builder_->CreateStore(updated_accum, accum_address);
+ b_->CreateStore(updated_accum, accum_address);
// Exit basic block of reduction loop.
// - Load accumulator value (the result).
// - Store into output array.
- SetToFirstInsertPoint(reduction_loop->GetExitBasicBlock(), ir_builder_);
+ SetToFirstInsertPoint(reduction_loop->GetExitBasicBlock(), b_);
- llvm::Value* result = ir_builder_->CreateLoad(accum_address);
+ llvm::Value* result = b_->CreateLoad(accum_address);
// Create index into target address. The target index is the concatenation of
// the rhs and lhs indexes with the reduction dimensions removed. The terms
@@ -1392,11 +1377,11 @@ Status DotOpEmitter::Emit() {
}
}
- target_array_.EmitWriteArrayElement(target_index, result, ir_builder_);
+ target_array_.EmitWriteArrayElement(target_index, result, b_);
// Set the IR builder insert point to the exit basic block of the outer most
// loop.
- ir_builder_->SetInsertPoint(loop_nest.GetOuterLoopExitBasicBlock());
+ b_->SetInsertPoint(loop_nest.GetOuterLoopExitBasicBlock());
return Status::OK();
}
@@ -1405,31 +1390,30 @@ Status DotOpEmitter::EmitScalarDot() {
// A scalar dot is just a scalar multiply.
llvm::Value* result;
// Use the same index_type for all tensor accesses in the same kernel.
- llvm::Type* index_type = ir_builder_->getInt64Ty();
+ llvm::Type* index_type = b_->getInt64Ty();
llvm_ir::IrArray::Index element_index(index_type);
llvm::Value* lhs_value =
- lhs_array_.EmitReadArrayElement(/*index=*/element_index, ir_builder_);
+ lhs_array_.EmitReadArrayElement(/*index=*/element_index, b_);
llvm::Value* rhs_value =
- rhs_array_.EmitReadArrayElement(/*index=*/element_index, ir_builder_);
+ rhs_array_.EmitReadArrayElement(/*index=*/element_index, b_);
if (ShapeUtil::ElementIsComplex(lhs_array_.GetShape())) {
-#define REAL(x) ir_builder_->CreateExtractValue(x, {0})
-#define IMAG(x) ir_builder_->CreateExtractValue(x, {1})
- llvm::Value* real = ir_builder_->CreateFSub(
- ir_builder_->CreateFMul(REAL(lhs_value), REAL(rhs_value)),
- ir_builder_->CreateFMul(IMAG(lhs_value), IMAG(rhs_value)));
- llvm::Value* imag = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(REAL(lhs_value), IMAG(rhs_value)),
- ir_builder_->CreateFMul(IMAG(lhs_value), REAL(rhs_value)));
+#define REAL(x) b_->CreateExtractValue(x, {0})
+#define IMAG(x) b_->CreateExtractValue(x, {1})
+ llvm::Value* real =
+ b_->CreateFSub(b_->CreateFMul(REAL(lhs_value), REAL(rhs_value)),
+ b_->CreateFMul(IMAG(lhs_value), IMAG(rhs_value)));
+ llvm::Value* imag =
+ b_->CreateFAdd(b_->CreateFMul(REAL(lhs_value), IMAG(rhs_value)),
+ b_->CreateFMul(IMAG(lhs_value), REAL(rhs_value)));
#undef IMAG
#undef REAL
result = llvm::ConstantAggregateZero::get(lhs_array_.GetElementLlvmType());
- result = ir_builder_->CreateInsertValue(result, real, {0});
- result = ir_builder_->CreateInsertValue(result, imag, {1});
+ result = b_->CreateInsertValue(result, real, {0});
+ result = b_->CreateInsertValue(result, imag, {1});
} else {
- result = ir_builder_->CreateFMul(lhs_value, rhs_value);
+ result = b_->CreateFMul(lhs_value, rhs_value);
}
- target_array_.EmitWriteArrayElement(/*index=*/element_index, result,
- ir_builder_);
+ target_array_.EmitWriteArrayElement(/*index=*/element_index, result, b_);
return Status::OK();
}
@@ -1452,7 +1436,7 @@ Status DotOpEmitter::EmitCallToRuntime() {
fn_name = multi_threaded
? runtime::kEigenMatMulF16SymbolName
: runtime::kEigenSingleThreadedMatMulF16SymbolName;
- float_type = ir_builder_->getHalfTy();
+ float_type = b_->getHalfTy();
break;
case F32:
fn_name = multi_threaded
@@ -1461,7 +1445,7 @@ Status DotOpEmitter::EmitCallToRuntime() {
: (use_mkl_dnn
? runtime::kMKLSingleThreadedMatMulF32SymbolName
: runtime::kEigenSingleThreadedMatMulF32SymbolName);
- float_type = ir_builder_->getFloatTy();
+ float_type = b_->getFloatTy();
break;
case F64:
fn_name = multi_threaded
@@ -1470,7 +1454,7 @@ Status DotOpEmitter::EmitCallToRuntime() {
: (use_mkl_dnn
? runtime::kMKLSingleThreadedMatMulF64SymbolName
: runtime::kEigenSingleThreadedMatMulF64SymbolName);
- float_type = ir_builder_->getDoubleTy();
+ float_type = b_->getDoubleTy();
break;
default:
return Unimplemented("Invalid type %s for dot operation",
@@ -1478,16 +1462,16 @@ Status DotOpEmitter::EmitCallToRuntime() {
}
llvm::Type* float_ptr_type = float_type->getPointerTo();
- llvm::Type* int64_type = ir_builder_->getInt64Ty();
- llvm::Type* int32_type = ir_builder_->getInt32Ty();
- llvm::Type* int8_ptr_type = ir_builder_->getInt8Ty()->getPointerTo();
+ llvm::Type* int64_type = b_->getInt64Ty();
+ llvm::Type* int32_type = b_->getInt32Ty();
+ llvm::Type* int8_ptr_type = b_->getInt8Ty()->getPointerTo();
llvm::FunctionType* matmul_type = llvm::FunctionType::get(
- ir_builder_->getVoidTy(),
+ b_->getVoidTy(),
{int8_ptr_type, float_ptr_type, float_ptr_type, float_ptr_type,
int64_type, int64_type, int64_type, int32_type, int32_type},
/*isVarArg=*/false);
- llvm::Function* function = ir_builder_->GetInsertBlock()->getParent();
+ llvm::Function* function = b_->GetInsertBlock()->getParent();
llvm::Module* module = function->getParent();
llvm::Function* matmul_func = llvm::cast<llvm::Function>(
@@ -1522,18 +1506,15 @@ Status DotOpEmitter::EmitCallToRuntime() {
std::swap(transpose_lhs, transpose_rhs);
}
- ir_builder_->CreateCall(
+ b_->CreateCall(
matmul_func,
- {ir_builder_->CreateBitCast(executable_run_options_value_, int8_ptr_type),
- ir_builder_->CreateBitCast(target_array_.GetBasePointer(),
- float_ptr_type),
- ir_builder_->CreateBitCast(lhs->GetBasePointer(), float_ptr_type),
- ir_builder_->CreateBitCast(rhs->GetBasePointer(), float_ptr_type),
- ir_builder_->getInt64(mat_mult_dims.m),
- ir_builder_->getInt64(mat_mult_dims.n),
- ir_builder_->getInt64(mat_mult_dims.k),
- ir_builder_->getInt32(transpose_lhs),
- ir_builder_->getInt32(transpose_rhs)});
+ {b_->CreateBitCast(executable_run_options_value_, int8_ptr_type),
+ b_->CreateBitCast(target_array_.GetBasePointer(), float_ptr_type),
+ b_->CreateBitCast(lhs->GetBasePointer(), float_ptr_type),
+ b_->CreateBitCast(rhs->GetBasePointer(), float_ptr_type),
+ b_->getInt64(mat_mult_dims.m), b_->getInt64(mat_mult_dims.n),
+ b_->getInt64(mat_mult_dims.k), b_->getInt32(transpose_lhs),
+ b_->getInt32(transpose_rhs)});
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h
index ed2a18976a..c2eeb0a1f9 100644
--- a/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/dot_op_emitter.h
@@ -61,7 +61,7 @@ class DotOpEmitter {
const HloInstruction& dot, const llvm_ir::IrArray& target_array,
const llvm_ir::IrArray& lhs_array, const llvm_ir::IrArray& rhs_array,
const llvm_ir::IrArray* addend_array,
- llvm::Value* executable_run_options_value, llvm::IRBuilder<>* ir_builder,
+ llvm::Value* executable_run_options_value, llvm::IRBuilder<>* b,
const HloModuleConfig& hlo_module_config,
const TargetMachineFeatures& target_machine_features);
@@ -70,8 +70,7 @@ class DotOpEmitter {
const llvm_ir::IrArray& lhs_array,
const llvm_ir::IrArray& rhs_array,
const llvm_ir::IrArray* addend_array,
- llvm::Value* executable_run_options_value,
- llvm::IRBuilder<>* ir_builder,
+ llvm::Value* executable_run_options_value, llvm::IRBuilder<>* b,
const HloModuleConfig& hlo_module_config,
const TargetMachineFeatures& target_machine_features);
@@ -171,7 +170,7 @@ class DotOpEmitter {
const llvm_ir::IrArray& rhs_array_;
const llvm_ir::IrArray* addend_array_;
llvm::Value* executable_run_options_value_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
const HloModuleConfig& hlo_module_config_;
const TargetMachineFeatures& target_machine_features_;
};
diff --git a/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc
index e97113dfa0..cf955a8add 100644
--- a/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc
@@ -38,8 +38,7 @@ StatusOr<llvm::Value*> CpuElementalIrEmitter::EmitFloatUnaryOp(
switch (element_type) {
case F16:
cast_result_to_fp16 = true;
- operand_value = ir_builder_->CreateFPCast(operand_value,
- ir_builder_->getFloatTy());
+ operand_value = b_->CreateFPCast(operand_value, b_->getFloatTy());
TF_FALLTHROUGH_INTENDED;
case F32:
function_name = "tanhf";
@@ -59,9 +58,9 @@ StatusOr<llvm::Value*> CpuElementalIrEmitter::EmitFloatUnaryOp(
function->setDoesNotThrow();
function->setDoesNotAccessMemory();
// Create an instruction to call the function.
- llvm::Value* result = ir_builder_->CreateCall(function, operand_value);
+ llvm::Value* result = b_->CreateCall(function, operand_value);
if (cast_result_to_fp16) {
- result = ir_builder_->CreateFPCast(result, ir_builder_->getHalfTy());
+ result = b_->CreateFPCast(result, b_->getHalfTy());
}
return result;
}
@@ -77,8 +76,8 @@ StatusOr<llvm::Value*> CpuElementalIrEmitter::EmitAtan2(
switch (prim_type) {
case F16:
cast_result_to_fp16 = true;
- lhs = ir_builder_->CreateFPCast(lhs, ir_builder_->getFloatTy());
- rhs = ir_builder_->CreateFPCast(rhs, ir_builder_->getFloatTy());
+ lhs = b_->CreateFPCast(lhs, b_->getFloatTy());
+ rhs = b_->CreateFPCast(rhs, b_->getFloatTy());
TF_FALLTHROUGH_INTENDED;
case F32:
function_name = "atan2f";
@@ -98,9 +97,9 @@ StatusOr<llvm::Value*> CpuElementalIrEmitter::EmitAtan2(
function->setDoesNotThrow();
function->setDoesNotAccessMemory();
// Create an instruction to call the function.
- llvm::Value* result = ir_builder_->CreateCall(function, {lhs, rhs});
+ llvm::Value* result = b_->CreateCall(function, {lhs, rhs});
if (cast_result_to_fp16) {
- result = ir_builder_->CreateFPCast(result, ir_builder_->getHalfTy());
+ result = b_->CreateFPCast(result, b_->getHalfTy());
}
return result;
}
diff --git a/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.h b/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.h
index 4446dfd282..9598a886ab 100644
--- a/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.h
@@ -31,7 +31,7 @@ class CpuElementalIrEmitter : public ElementalIrEmitter {
public:
CpuElementalIrEmitter(const HloModuleConfig& module_config,
IrEmitter* ir_emitter, llvm::Module* module)
- : ElementalIrEmitter(module_config, module, ir_emitter->ir_builder()),
+ : ElementalIrEmitter(module_config, module, ir_emitter->b()),
ir_emitter_(ir_emitter) {}
llvm_ir::ElementGenerator MakeElementGenerator(
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
index 2ad41374d3..d4ac35a604 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.cc
@@ -51,10 +51,10 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
-#include "tensorflow/compiler/xla/service/llvm_ir/ops.h"
#include "tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -89,14 +89,14 @@ IrEmitter::IrEmitter(
: assignment_(assignment),
module_(llvm_module),
arch_type_(llvm::Triple(llvm_module->getTargetTriple()).getArch()),
- ir_builder_(llvm_module->getContext()),
+ b_(llvm_module->getContext()),
instruction_to_profile_idx_(std::move(instruction_to_profile_idx)),
computation_to_profile_idx_(std::move(computation_to_profile_idx)),
alias_analysis_(hlo_module, assignment, &llvm_module->getContext()),
hlo_module_config_(hlo_module.config()),
is_top_level_computation_(false),
target_machine_features_(*target_machine_features) {
- ir_builder_.setFastMathFlags(llvm_ir::GetFastMathFlags(
+ b_.setFastMathFlags(llvm_ir::GetFastMathFlags(
/*fast_math_enabled=*/hlo_module_config_.debug_options()
.xla_enable_fast_math()));
}
@@ -146,7 +146,7 @@ void IrEmitter::InitializeIrFunction(const string& function_name) {
new IrFunction(function_name, linkage,
options::OptimizeForSizeRequested(hlo_module_config_),
hlo_module_config_.debug_options().xla_enable_fast_math(),
- module_, &ir_builder_, num_dynamic_loop_bounds_));
+ module_, &b_, num_dynamic_loop_bounds_));
}
IrEmitter::~IrEmitter() {}
@@ -154,9 +154,9 @@ IrEmitter::~IrEmitter() {}
Status IrEmitter::HandleBitcast(HloInstruction* bitcast) {
VLOG(2) << "HandleBitcast: " << bitcast->ToString();
emitted_value_[bitcast] =
- ir_builder_.CreateBitCast(GetEmittedValueFor(bitcast->operand(0)),
- IrShapeType(bitcast->shape())->getPointerTo(),
- AsStringRef(IrName(bitcast)));
+ b_.CreateBitCast(GetEmittedValueFor(bitcast->operand(0)),
+ IrShapeType(bitcast->shape())->getPointerTo(),
+ AsStringRef(IrName(bitcast)));
return Status::OK();
}
@@ -273,7 +273,7 @@ Status IrEmitter::HandleGetTupleElement(HloInstruction* get_tuple_element) {
const Shape& shape = get_tuple_element->shape();
emitted_value_[get_tuple_element] = llvm_ir::EmitGetTupleElement(
shape, get_tuple_element->tuple_index(), MinimumAlignmentForShape(shape),
- GetEmittedValueFor(operand), &ir_builder_, module_);
+ GetEmittedValueFor(operand), &b_, module_);
return Status::OK();
}
@@ -293,7 +293,7 @@ Status IrEmitter::HandleTupleSelect(HloInstruction* tuple_select) {
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(tuple_select));
llvm_ir::EmitTupleSelect(GetIrArrayFor(tuple_select), GetIrArrayFor(pred),
GetEmittedValueFor(on_true),
- GetEmittedValueFor(on_false), &ir_builder_, module_);
+ GetEmittedValueFor(on_false), &b_, module_);
return Status::OK();
}
@@ -316,8 +316,8 @@ Status IrEmitter::HandleInfeed(HloInstruction* instruction) {
assignment_.GetUniqueSlice(infeed, {1}));
llvm::Value* token_address = EmitTempBufferPointer(
token_slice, ShapeUtil::GetTupleElementShape(infeed->shape(), 1));
- llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address},
- &ir_builder_, module_);
+ llvm_ir::EmitTuple(GetIrArrayFor(infeed), {data_address, token_address}, &b_,
+ module_);
if (ShapeUtil::IsTuple(data_shape)) {
TF_RET_CHECK(!ShapeUtil::IsNestedTuple(data_shape));
@@ -348,7 +348,7 @@ Status IrEmitter::HandleInfeed(HloInstruction* instruction) {
}
llvm_ir::EmitTuple(llvm_ir::IrArray(data_address, data_shape),
- tuple_element_addresses, &ir_builder_, module_);
+ tuple_element_addresses, &b_, module_);
} else {
TF_RETURN_IF_ERROR(
EmitXfeedTransfer(XfeedKind::kInfeed, data_shape, data_address));
@@ -369,14 +369,14 @@ Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
int32 length_32 = static_cast<int32>(length);
int32 shape_length;
- TF_ASSIGN_OR_RETURN(llvm::Value * shape_ptr,
- llvm_ir::EncodeSelfDescribingShapeConstant(
- shape, &shape_length, &ir_builder_));
+ TF_ASSIGN_OR_RETURN(
+ llvm::Value * shape_ptr,
+ llvm_ir::EncodeSelfDescribingShapeConstant(shape, &shape_length, &b_));
// The signature of the acquire infeed buffer function is:
//
// (void*)(int32 length);
- llvm::Type* int32_type = ir_builder_.getInt32Ty();
+ llvm::Type* int32_type = b_.getInt32Ty();
llvm::Type* i8_ptr_type = llvm::Type::getInt8PtrTy(module_->getContext());
llvm::FunctionType* acquire_type = llvm::FunctionType::get(
i8_ptr_type, {int32_type, i8_ptr_type, int32_type},
@@ -396,8 +396,7 @@ Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
//
// (void)(int32 length, void* buffer);
llvm::FunctionType* release_type = llvm::FunctionType::get(
- ir_builder_.getVoidTy(),
- {int32_type, i8_ptr_type, i8_ptr_type, int32_type},
+ b_.getVoidTy(), {int32_type, i8_ptr_type, i8_ptr_type, int32_type},
/*isVarArg=*/false);
llvm::Function* release_func;
@@ -414,25 +413,22 @@ Status IrEmitter::EmitXfeedTransfer(XfeedKind kind, const Shape& shape,
// of size exactly 'length_32', and the runtime is responsible for
// check-failing the process if there is a mismatch, versus passing us back a
// buffer that we might overrun.
- llvm::Value* acquired_pointer = ir_builder_.CreateCall(
- acquire_func, {ir_builder_.getInt32(length_32), shape_ptr,
- ir_builder_.getInt32(shape_length)});
+ llvm::Value* acquired_pointer = b_.CreateCall(
+ acquire_func,
+ {b_.getInt32(length_32), shape_ptr, b_.getInt32(shape_length)});
if (kind == XfeedKind::kInfeed) {
// Copy to the program buffer address from the acquired buffer.
- ir_builder_.CreateMemCpy(program_buffer_address, /*DstAlign=*/1,
- acquired_pointer,
- /*SrcAlign=*/1, length_32);
+ b_.CreateMemCpy(program_buffer_address, /*DstAlign=*/1, acquired_pointer,
+ /*SrcAlign=*/1, length_32);
} else {
// Outfeed -- copy from the in-program address to the acquired buffer.
- ir_builder_.CreateMemCpy(acquired_pointer, /*DstAlign=*/1,
- program_buffer_address,
- /*SrcAlign=*/1, length_32);
+ b_.CreateMemCpy(acquired_pointer, /*DstAlign=*/1, program_buffer_address,
+ /*SrcAlign=*/1, length_32);
}
- ir_builder_.CreateCall(release_func,
- {ir_builder_.getInt32(length_32), acquired_pointer,
- shape_ptr, ir_builder_.getInt32(shape_length)});
+ b_.CreateCall(release_func, {b_.getInt32(length_32), acquired_pointer,
+ shape_ptr, b_.getInt32(shape_length)});
return Status::OK();
}
@@ -453,7 +449,7 @@ Status IrEmitter::HandleOutfeed(HloInstruction* outfeed) {
ShapeUtil::GetTupleElementShape(operand_shape, i);
llvm::Value* tuple_element = llvm_ir::EmitGetTupleElement(
tuple_element_shape, i, MinimumAlignmentForShape(tuple_element_shape),
- value, &ir_builder_, module_);
+ value, &b_, module_);
TF_RETURN_IF_ERROR(EmitXfeedTransfer(XfeedKind::kOutfeed,
tuple_element_shape, tuple_element));
}
@@ -472,7 +468,7 @@ Status IrEmitter::HandleTuple(HloInstruction* tuple) {
for (auto operand : tuple->operands()) {
base_ptrs.push_back(GetEmittedValueFor(operand));
}
- llvm_ir::EmitTuple(GetIrArrayFor(tuple), base_ptrs, &ir_builder_, module_);
+ llvm_ir::EmitTuple(GetIrArrayFor(tuple), base_ptrs, &b_, module_);
return Status::OK();
}
@@ -483,8 +479,7 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForMap(
std::vector<llvm::Value*> parameter_addresses;
for (const HloInstruction* operand : map->operands()) {
const llvm_ir::IrArray& array = GetIrArrayFor(operand);
- parameter_addresses.push_back(
- array.EmitArrayElementAddress(index, &ir_builder_));
+ parameter_addresses.push_back(array.EmitArrayElementAddress(index, &b_));
}
return EmitElementFunctionCall(mapped_ir_function, map->shape(),
parameter_addresses, "map_function");
@@ -510,13 +505,12 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduceWindow(
PrimitiveType operand_element_type = operand->shape().element_type();
llvm::Value* accumulator_address = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
- "reduce_window_accumulator_address", &ir_builder_,
+ "reduce_window_accumulator_address", &b_,
MinimumAlignmentForPrimitiveType(operand_element_type));
- ir_builder_.CreateStore(
- ir_builder_.CreateLoad(GetEmittedValueFor(reduce_window->operand(1))),
- accumulator_address);
+ b_.CreateStore(b_.CreateLoad(GetEmittedValueFor(reduce_window->operand(1))),
+ accumulator_address);
- llvm_ir::ForLoopNest loops(IrName(reduce_window, "inner"), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(reduce_window, "inner"), &b_);
std::vector<int64> window_size;
for (const auto& dim : window.dimensions()) {
window_size.push_back(dim.size());
@@ -525,48 +519,47 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduceWindow(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
CHECK_EQ(window_index.size(), index.size());
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
- llvm_ir::IrArray::Index input_index(ir_builder_.getInt64Ty(), index.size());
+ llvm_ir::IrArray::Index input_index(b_.getInt64Ty(), index.size());
llvm::Value* in_bounds_condition = nullptr;
for (size_t i = 0; i < index.size(); ++i) {
- llvm::Value* strided_index = ir_builder_.CreateNSWMul(
- index[i], ir_builder_.getInt64(window.dimensions(i).stride()));
- input_index[i] = ir_builder_.CreateNSWSub(
- ir_builder_.CreateNSWAdd(strided_index, window_index[i]),
- ir_builder_.getInt64(window.dimensions(i).padding_low()));
+ llvm::Value* strided_index =
+ b_.CreateNSWMul(index[i], b_.getInt64(window.dimensions(i).stride()));
+ input_index[i] =
+ b_.CreateNSWSub(b_.CreateNSWAdd(strided_index, window_index[i]),
+ b_.getInt64(window.dimensions(i).padding_low()));
// We need to check if 0 <= input_index[i] < bound, as otherwise we are in
// the padding so that we can skip the computation. That is equivalent to
// input_index[i] < bound as an *unsigned* comparison, since a negative
// value will wrap to a large positive value.
- llvm::Value* index_condition = ir_builder_.CreateICmpULT(
+ llvm::Value* index_condition = b_.CreateICmpULT(
input_index[i],
- ir_builder_.getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
+ b_.getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
if (in_bounds_condition == nullptr) {
in_bounds_condition = index_condition;
} else {
- in_bounds_condition =
- ir_builder_.CreateAnd(in_bounds_condition, index_condition);
+ in_bounds_condition = b_.CreateAnd(in_bounds_condition, index_condition);
}
}
CHECK(in_bounds_condition != nullptr);
llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &ir_builder_);
- SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+ llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &b_);
+ SetToFirstInsertPoint(if_data.true_block, &b_);
// We are not in the padding, so carry out the computation.
llvm_ir::IrArray input_array(GetIrArrayFor(operand));
llvm::Value* input_value_address =
- input_array.EmitArrayElementAddress(input_index, &ir_builder_);
+ input_array.EmitArrayElementAddress(input_index, &b_);
llvm::Value* result = EmitElementFunctionCall(
reducer_function, reduce_window->shape(),
{accumulator_address, input_value_address}, "reducer_function");
- ir_builder_.CreateStore(result, accumulator_address);
+ b_.CreateStore(result, accumulator_address);
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(accumulator_address);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
+ return b_.CreateLoad(accumulator_address);
}
Status IrEmitter::HandleReduceWindow(HloInstruction* reduce_window) {
@@ -649,141 +642,127 @@ Status IrEmitter::HandleSelectAndScatter(HloInstruction* select_and_scatter) {
select_and_scatter, /*desc=*/IrName(select_and_scatter, "init"),
[this, init_value](const llvm_ir::IrArray::Index& target_index) {
llvm::Value* init_value_addr = GetEmittedValueFor(init_value);
- return ir_builder_.CreateLoad(init_value_addr);
+ return b_.CreateLoad(init_value_addr);
}));
// Create a loop to iterate over the source array to scatter to the output.
- llvm_ir::ForLoopNest source_loops(IrName(select_and_scatter), &ir_builder_);
+ llvm_ir::ForLoopNest source_loops(IrName(select_and_scatter), &b_);
const llvm_ir::IrArray::Index source_index =
source_loops.AddLoopsForShape(source->shape(), "source");
- SetToFirstInsertPoint(source_loops.GetInnerLoopBodyBasicBlock(),
- &ir_builder_);
+ SetToFirstInsertPoint(source_loops.GetInnerLoopBodyBasicBlock(), &b_);
// Allocate space to keep the currently selected value, its index, and
// the boolean initialized_flag, which is initially set to false.
llvm::Value* selected_value_address = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
- "selected_value_address", &ir_builder_,
+ "selected_value_address", &b_,
MinimumAlignmentForPrimitiveType(operand_element_type));
llvm::Value* selected_index_address =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
- ir_builder_.getInt64Ty(), ir_builder_.getInt32(rank),
- "selected_index_address", &ir_builder_);
+ b_.getInt64Ty(), b_.getInt32(rank), "selected_index_address", &b_);
llvm::Value* initialized_flag_address = llvm_ir::EmitAllocaAtFunctionEntry(
- ir_builder_.getInt1Ty(), "initialized_flag_address", &ir_builder_);
- ir_builder_.CreateStore(ir_builder_.getInt1(false), initialized_flag_address);
+ b_.getInt1Ty(), "initialized_flag_address", &b_);
+ b_.CreateStore(b_.getInt1(false), initialized_flag_address);
// Create the inner loop to iterate over the window.
- llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "window"),
- &ir_builder_);
+ llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "window"), &b_);
std::vector<int64> window_size;
for (const auto& dim : window.dimensions()) {
window_size.push_back(dim.size());
}
const llvm_ir::IrArray::Index window_index = window_loops.AddLoopsForShape(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
- SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(),
- &ir_builder_);
+ SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(), &b_);
// Compute the operand index to visit and evaluate the condition whether the
// operand index is within the bounds. The unsigned comparison includes
// checking whether the operand index >= 0.
- llvm_ir::IrArray::Index operand_index(ir_builder_.getInt64Ty(),
- source_index.size());
- llvm::Value* in_bounds_condition = ir_builder_.getTrue();
+ llvm_ir::IrArray::Index operand_index(b_.getInt64Ty(), source_index.size());
+ llvm::Value* in_bounds_condition = b_.getTrue();
for (int64 i = 0; i < rank; ++i) {
- llvm::Value* strided_index = ir_builder_.CreateNSWMul(
- source_index[i], ir_builder_.getInt64(window.dimensions(i).stride()));
- operand_index[i] = ir_builder_.CreateNSWSub(
- ir_builder_.CreateNSWAdd(strided_index, window_index[i]),
- ir_builder_.getInt64(window.dimensions(i).padding_low()));
- llvm::Value* index_condition = ir_builder_.CreateICmpULT(
+ llvm::Value* strided_index = b_.CreateNSWMul(
+ source_index[i], b_.getInt64(window.dimensions(i).stride()));
+ operand_index[i] =
+ b_.CreateNSWSub(b_.CreateNSWAdd(strided_index, window_index[i]),
+ b_.getInt64(window.dimensions(i).padding_low()));
+ llvm::Value* index_condition = b_.CreateICmpULT(
operand_index[i],
- ir_builder_.getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
- in_bounds_condition =
- ir_builder_.CreateAnd(in_bounds_condition, index_condition);
+ b_.getInt64(ShapeUtil::GetDimension(operand->shape(), i)));
+ in_bounds_condition = b_.CreateAnd(in_bounds_condition, index_condition);
}
CHECK(in_bounds_condition != nullptr);
// Only need to do something if the operand index is within the bounds. First
// check if the initialized_flag is set.
llvm_ir::LlvmIfData if_in_bounds =
- llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &ir_builder_);
- SetToFirstInsertPoint(if_in_bounds.true_block, &ir_builder_);
- llvm_ir::LlvmIfData if_initialized =
- llvm_ir::EmitIfThenElse(ir_builder_.CreateLoad(initialized_flag_address),
- "initialized", &ir_builder_);
+ llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &b_);
+ SetToFirstInsertPoint(if_in_bounds.true_block, &b_);
+ llvm_ir::LlvmIfData if_initialized = llvm_ir::EmitIfThenElse(
+ b_.CreateLoad(initialized_flag_address), "initialized", &b_);
// If the initialized_flag is false, initialize the selected value and index
// with the currently visiting operand.
- SetToFirstInsertPoint(if_initialized.false_block, &ir_builder_);
+ SetToFirstInsertPoint(if_initialized.false_block, &b_);
const auto save_operand_index =
[&](const llvm_ir::IrArray::Index& operand_index) {
for (int64 i = 0; i < rank; ++i) {
llvm::Value* selected_index_address_slot =
- ir_builder_.CreateInBoundsGEP(selected_index_address,
- {ir_builder_.getInt32(i)});
- ir_builder_.CreateStore(operand_index[i],
- selected_index_address_slot);
+ b_.CreateInBoundsGEP(selected_index_address, {b_.getInt32(i)});
+ b_.CreateStore(operand_index[i], selected_index_address_slot);
}
};
llvm_ir::IrArray operand_array(GetIrArrayFor(operand));
llvm::Value* operand_data =
- operand_array.EmitReadArrayElement(operand_index, &ir_builder_);
- ir_builder_.CreateStore(operand_data, selected_value_address);
+ operand_array.EmitReadArrayElement(operand_index, &b_);
+ b_.CreateStore(operand_data, selected_value_address);
save_operand_index(operand_index);
- ir_builder_.CreateStore(ir_builder_.getInt1(true), initialized_flag_address);
+ b_.CreateStore(b_.getInt1(true), initialized_flag_address);
// If the initialized_flag is true, call the `select` function to potentially
// update the selected value and index with the currently visiting operand.
- SetToFirstInsertPoint(if_initialized.true_block, &ir_builder_);
+ SetToFirstInsertPoint(if_initialized.true_block, &b_);
const Shape output_shape = ShapeUtil::MakeShape(PRED, {});
llvm::Value* operand_address =
- operand_array.EmitArrayElementAddress(operand_index, &ir_builder_);
+ operand_array.EmitArrayElementAddress(operand_index, &b_);
llvm::Value* result = EmitElementFunctionCall(
select_function, output_shape, {selected_value_address, operand_address},
"select_function");
// If the 'select' function returns false, update the selected value and the
// index to the currently visiting operand.
- llvm::Value* cond = ir_builder_.CreateICmpNE(
+ llvm::Value* cond = b_.CreateICmpNE(
result,
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),
"boolean_predicate");
llvm_ir::LlvmIfData if_select_lhs =
- llvm_ir::EmitIfThenElse(cond, "if-select-lhs", &ir_builder_);
- SetToFirstInsertPoint(if_select_lhs.false_block, &ir_builder_);
- ir_builder_.CreateStore(ir_builder_.CreateLoad(operand_address),
- selected_value_address);
+ llvm_ir::EmitIfThenElse(cond, "if-select-lhs", &b_);
+ SetToFirstInsertPoint(if_select_lhs.false_block, &b_);
+ b_.CreateStore(b_.CreateLoad(operand_address), selected_value_address);
save_operand_index(operand_index);
// After iterating over the window elements, scatter the source element to
// the selected index of the output. The value we store at the output
// location is computed by calling the `scatter` function with the source
// value and the current output value.
- SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(),
- &ir_builder_);
+ SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(), &b_);
llvm_ir::IrArray::Index selected_index(source_index.GetType());
for (int64 i = 0; i < rank; ++i) {
- llvm::Value* selected_index_address_slot = ir_builder_.CreateInBoundsGEP(
- selected_index_address, {ir_builder_.getInt32(i)});
- selected_index.push_back(
- ir_builder_.CreateLoad(selected_index_address_slot));
+ llvm::Value* selected_index_address_slot =
+ b_.CreateInBoundsGEP(selected_index_address, {b_.getInt32(i)});
+ selected_index.push_back(b_.CreateLoad(selected_index_address_slot));
}
llvm_ir::IrArray source_array(GetIrArrayFor(source));
llvm::Value* source_value_address =
- source_array.EmitArrayElementAddress(source_index, &ir_builder_);
+ source_array.EmitArrayElementAddress(source_index, &b_);
llvm_ir::IrArray output_array(GetIrArrayFor(select_and_scatter));
llvm::Value* output_value_address =
- output_array.EmitArrayElementAddress(selected_index, &ir_builder_);
+ output_array.EmitArrayElementAddress(selected_index, &b_);
llvm::Value* scatter_value = EmitElementFunctionCall(
scatter_function, source->shape(),
{output_value_address, source_value_address}, "scatter_function");
- output_array.EmitWriteArrayElement(selected_index, scatter_value,
- &ir_builder_);
+ output_array.EmitWriteArrayElement(selected_index, scatter_value, &b_);
- SetToFirstInsertPoint(source_loops.GetOuterLoopExitBasicBlock(),
- &ir_builder_);
+ SetToFirstInsertPoint(source_loops.GetOuterLoopExitBasicBlock(), &b_);
return Status::OK();
}
@@ -822,7 +801,7 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
// Dot operation is complicated so we delegate to a helper class.
return DotOpEmitter::EmitDotOperation(
*dot, target_array, lhs_array, rhs_array, /*addend_array=*/nullptr,
- GetExecutableRunOptionsArgument(), &ir_builder_, hlo_module_config_,
+ GetExecutableRunOptionsArgument(), &b_, hlo_module_config_,
target_machine_features_);
}
@@ -849,12 +828,12 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
llvm::Type* lhs_llvm_type =
llvm_ir::PrimitiveTypeToIrType(lhs_element_type, module_);
llvm::Value* sum_address = llvm_ir::EmitAllocaAtFunctionEntry(
- lhs_llvm_type, "convolution_sum_address", &ir_builder_,
+ lhs_llvm_type, "convolution_sum_address", &b_,
MinimumAlignmentForPrimitiveType(lhs_element_type));
llvm::Value* constant_zero = llvm::Constant::getNullValue(lhs_llvm_type);
- ir_builder_.CreateStore(constant_zero, sum_address);
+ b_.CreateStore(constant_zero, sum_address);
- llvm_ir::ForLoopNest loops(IrName(convolution, "inner"), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(convolution, "inner"), &b_);
std::vector<llvm::Value*> kernel_spatial(num_spatial_dims);
for (int i = 0; i < num_spatial_dims; ++i) {
kernel_spatial[i] =
@@ -870,7 +849,7 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
"iz")
->GetIndVarValue();
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
// Calculate the spatial index in the input array, taking striding, dilation
// and padding into account. An index in the padding will be out of the bounds
@@ -878,13 +857,12 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
const auto calculate_input_index = [this](llvm::Value* output_index,
llvm::Value* kernel_index,
const WindowDimension& window_dim) {
- llvm::Value* strided_index = ir_builder_.CreateNSWMul(
- output_index, ir_builder_.getInt64(window_dim.stride()));
- llvm::Value* dilated_kernel_index = ir_builder_.CreateNSWMul(
- kernel_index, ir_builder_.getInt64(window_dim.window_dilation()));
- return ir_builder_.CreateNSWSub(
- ir_builder_.CreateNSWAdd(strided_index, dilated_kernel_index),
- ir_builder_.getInt64(window_dim.padding_low()));
+ llvm::Value* strided_index =
+ b_.CreateNSWMul(output_index, b_.getInt64(window_dim.stride()));
+ llvm::Value* dilated_kernel_index = b_.CreateNSWMul(
+ kernel_index, b_.getInt64(window_dim.window_dilation()));
+ return b_.CreateNSWSub(b_.CreateNSWAdd(strided_index, dilated_kernel_index),
+ b_.getInt64(window_dim.padding_low()));
};
std::vector<llvm::Value*> input_spatial(num_spatial_dims);
for (int i = 0; i < num_spatial_dims; ++i) {
@@ -901,30 +879,27 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
// Also need to check that the input coordinates are not in one of the
// holes created by base dilation.
const auto not_in_hole = [&](llvm::Value* input_index, int64 base_dilation) {
- llvm::Value* remainder = ir_builder_.CreateSRem(
- input_index, ir_builder_.getInt64(base_dilation));
- return ir_builder_.CreateICmpEQ(remainder, ir_builder_.getInt64(0));
+ llvm::Value* remainder =
+ b_.CreateSRem(input_index, b_.getInt64(base_dilation));
+ return b_.CreateICmpEQ(remainder, b_.getInt64(0));
};
- llvm::Value* in_bounds_condition = ir_builder_.getInt1(true);
+ llvm::Value* in_bounds_condition = b_.getInt1(true);
for (int i = 0; i < num_spatial_dims; ++i) {
- llvm::ConstantInt* input_bound =
- ir_builder_.getInt64(window_util::DilatedBound(
- lhs->shape().dimensions(dnums.input_spatial_dimensions(i)),
- window.dimensions(i).base_dilation()));
- llvm::Value* dim_in_bound =
- ir_builder_.CreateICmpULT(input_spatial[i], input_bound);
+ llvm::ConstantInt* input_bound = b_.getInt64(window_util::DilatedBound(
+ lhs->shape().dimensions(dnums.input_spatial_dimensions(i)),
+ window.dimensions(i).base_dilation()));
+ llvm::Value* dim_in_bound = b_.CreateICmpULT(input_spatial[i], input_bound);
llvm::Value* dim_not_in_hole =
not_in_hole(input_spatial[i], window.dimensions(i).base_dilation());
- llvm::Value* dim_ok = ir_builder_.CreateAnd(dim_in_bound, dim_not_in_hole);
- in_bounds_condition = ir_builder_.CreateAnd(in_bounds_condition, dim_ok);
+ llvm::Value* dim_ok = b_.CreateAnd(dim_in_bound, dim_not_in_hole);
+ in_bounds_condition = b_.CreateAnd(in_bounds_condition, dim_ok);
}
// Now we need to map the dilated base coordinates back to the actual
// data indices on the lhs.
const auto undilate = [&](llvm::Value* input_index, int64 base_dilation) {
- return ir_builder_.CreateSDiv(input_index,
- ir_builder_.getInt64(base_dilation));
+ return b_.CreateSDiv(input_index, b_.getInt64(base_dilation));
};
for (int i = 0; i < num_spatial_dims; ++i) {
input_spatial[i] =
@@ -932,12 +907,12 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
}
llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &ir_builder_);
- SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+ llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &b_);
+ SetToFirstInsertPoint(if_data.true_block, &b_);
// We are not in the padding, so carry out the computation.
int num_dims = num_spatial_dims + 2;
- llvm_ir::IrArray::Index input_index(ir_builder_.getInt64Ty(), num_dims);
+ llvm_ir::IrArray::Index input_index(b_.getInt64Ty(), num_dims);
for (int i = 0; i < num_spatial_dims; ++i) {
input_index[dnums.input_spatial_dimensions(i)] = input_spatial[i];
}
@@ -945,13 +920,12 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
input_index[dnums.input_batch_dimension()] = batch;
llvm_ir::IrArray kernel_array(GetIrArrayFor(rhs));
- llvm_ir::IrArray::Index kernel_index(ir_builder_.getInt64Ty(), num_dims);
+ llvm_ir::IrArray::Index kernel_index(b_.getInt64Ty(), num_dims);
for (int i = 0; i < num_spatial_dims; ++i) {
kernel_index[dnums.kernel_spatial_dimensions(i)] =
window.dimensions(i).window_reversal()
- ? ir_builder_.CreateNSWSub(
- ir_builder_.getInt64(window.dimensions(i).size() - 1),
- kernel_spatial[i])
+ ? b_.CreateNSWSub(b_.getInt64(window.dimensions(i).size() - 1),
+ kernel_spatial[i])
: kernel_spatial[i];
}
@@ -959,15 +933,14 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForConvolution(
kernel_index[dnums.kernel_output_feature_dimension()] = output_feature;
llvm_ir::IrArray input_array(GetIrArrayFor(lhs));
- llvm::Value* product = ir_builder_.CreateFMul(
- input_array.EmitReadArrayElement(input_index, &ir_builder_),
- kernel_array.EmitReadArrayElement(kernel_index, &ir_builder_));
- llvm::Value* sum =
- ir_builder_.CreateFAdd(ir_builder_.CreateLoad(sum_address), product);
- ir_builder_.CreateStore(sum, sum_address);
+ llvm::Value* product =
+ b_.CreateFMul(input_array.EmitReadArrayElement(input_index, &b_),
+ kernel_array.EmitReadArrayElement(kernel_index, &b_));
+ llvm::Value* sum = b_.CreateFAdd(b_.CreateLoad(sum_address), product);
+ b_.CreateStore(sum, sum_address);
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(sum_address);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
+ return b_.CreateLoad(sum_address);
}
Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
@@ -1056,12 +1029,12 @@ Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
PrimitiveType primitive_type = lhs->shape().element_type();
llvm::Type* ir_ptr_type = primitive_type == F16
- ? ir_builder_.getHalfTy()->getPointerTo()
- : ir_builder_.getFloatTy()->getPointerTo();
- llvm::Type* int64_type = ir_builder_.getInt64Ty();
- llvm::Type* int8_ptr_type = ir_builder_.getInt8Ty()->getPointerTo();
+ ? b_.getHalfTy()->getPointerTo()
+ : b_.getFloatTy()->getPointerTo();
+ llvm::Type* int64_type = b_.getInt64Ty();
+ llvm::Type* int8_ptr_type = b_.getInt8Ty()->getPointerTo();
llvm::FunctionType* conv_type = llvm::FunctionType::get(
- ir_builder_.getVoidTy(),
+ b_.getVoidTy(),
{int8_ptr_type, ir_ptr_type, ir_ptr_type, ir_ptr_type, int64_type,
int64_type, int64_type, int64_type, int64_type, int64_type,
int64_type, int64_type, int64_type, int64_type, int64_type,
@@ -1093,34 +1066,34 @@ Status IrEmitter::HandleConvolution(HloInstruction* convolution) {
conv_func->setCallingConv(llvm::CallingConv::C);
conv_func->setDoesNotThrow();
conv_func->setOnlyAccessesArgMemory();
- ir_builder_.CreateCall(
- conv_func, {
- GetExecutableRunOptionsArgument(),
- ir_builder_.CreateBitCast(
- GetEmittedValueFor(convolution), ir_ptr_type),
- ir_builder_.CreateBitCast(lhs_address, ir_ptr_type),
- ir_builder_.CreateBitCast(rhs_address, ir_ptr_type),
- ir_builder_.getInt64(input_batch),
- ir_builder_.getInt64(input_rows),
- ir_builder_.getInt64(input_cols),
- ir_builder_.getInt64(input_channels),
- ir_builder_.getInt64(kernel_rows),
- ir_builder_.getInt64(kernel_cols),
- ir_builder_.getInt64(kernel_channels),
- ir_builder_.getInt64(kernel_filters),
- ir_builder_.getInt64(output_rows),
- ir_builder_.getInt64(output_cols),
- ir_builder_.getInt64(row_stride),
- ir_builder_.getInt64(col_stride),
- ir_builder_.getInt64(padding_top),
- ir_builder_.getInt64(padding_bottom),
- ir_builder_.getInt64(padding_left),
- ir_builder_.getInt64(padding_right),
- ir_builder_.getInt64(lhs_row_dilation),
- ir_builder_.getInt64(lhs_col_dilation),
- ir_builder_.getInt64(rhs_row_dilation),
- ir_builder_.getInt64(rhs_col_dilation),
- });
+ b_.CreateCall(
+ conv_func,
+ {
+ GetExecutableRunOptionsArgument(),
+ b_.CreateBitCast(GetEmittedValueFor(convolution), ir_ptr_type),
+ b_.CreateBitCast(lhs_address, ir_ptr_type),
+ b_.CreateBitCast(rhs_address, ir_ptr_type),
+ b_.getInt64(input_batch),
+ b_.getInt64(input_rows),
+ b_.getInt64(input_cols),
+ b_.getInt64(input_channels),
+ b_.getInt64(kernel_rows),
+ b_.getInt64(kernel_cols),
+ b_.getInt64(kernel_channels),
+ b_.getInt64(kernel_filters),
+ b_.getInt64(output_rows),
+ b_.getInt64(output_cols),
+ b_.getInt64(row_stride),
+ b_.getInt64(col_stride),
+ b_.getInt64(padding_top),
+ b_.getInt64(padding_bottom),
+ b_.getInt64(padding_left),
+ b_.getInt64(padding_right),
+ b_.getInt64(lhs_row_dilation),
+ b_.getInt64(lhs_col_dilation),
+ b_.getInt64(rhs_row_dilation),
+ b_.getInt64(rhs_col_dilation),
+ });
return Status::OK();
}
@@ -1159,11 +1132,11 @@ Status IrEmitter::HandleFft(HloInstruction* fft) {
}
// Args have been computed, make the call.
- llvm::Type* int8_ptr_type = ir_builder_.getInt8Ty()->getPointerTo();
- llvm::Type* int32_type = ir_builder_.getInt32Ty();
- llvm::Type* int64_type = ir_builder_.getInt64Ty();
+ llvm::Type* int8_ptr_type = b_.getInt8Ty()->getPointerTo();
+ llvm::Type* int32_type = b_.getInt32Ty();
+ llvm::Type* int64_type = b_.getInt64Ty();
llvm::FunctionType* fft_type = llvm::FunctionType::get(
- ir_builder_.getVoidTy(),
+ b_.getVoidTy(),
{int8_ptr_type, int8_ptr_type, int8_ptr_type, int32_type, int32_type,
int64_type, int64_type, int64_type, int64_type},
/*isVarArg=*/false);
@@ -1180,16 +1153,15 @@ Status IrEmitter::HandleFft(HloInstruction* fft) {
fft_func->setDoesNotThrow();
fft_func->setOnlyAccessesInaccessibleMemOrArgMem();
const int fft_rank = fft_length.size();
- ir_builder_.CreateCall(
+ b_.CreateCall(
fft_func,
{GetExecutableRunOptionsArgument(),
- ir_builder_.CreateBitCast(GetEmittedValueFor(fft), int8_ptr_type),
- ir_builder_.CreateBitCast(operand_address, int8_ptr_type),
- ir_builder_.getInt32(fft->fft_type()), ir_builder_.getInt32(fft_rank),
- ir_builder_.getInt64(input_batch),
- ir_builder_.getInt64(fft_rank > 0 ? fft_length[0] : 0),
- ir_builder_.getInt64(fft_rank > 1 ? fft_length[1] : 0),
- ir_builder_.getInt64(fft_rank > 2 ? fft_length[2] : 0)});
+ b_.CreateBitCast(GetEmittedValueFor(fft), int8_ptr_type),
+ b_.CreateBitCast(operand_address, int8_ptr_type),
+ b_.getInt32(fft->fft_type()), b_.getInt32(fft_rank),
+ b_.getInt64(input_batch), b_.getInt64(fft_rank > 0 ? fft_length[0] : 0),
+ b_.getInt64(fft_rank > 1 ? fft_length[1] : 0),
+ b_.getInt64(fft_rank > 2 ? fft_length[2] : 0)});
return Status::OK();
}
@@ -1228,11 +1200,10 @@ Status IrEmitter::HandleCrossReplicaSum(HloInstruction* crs) {
operand_ptrs.push_back(EmitTempBufferPointer(out_slice, operand_shape));
// TODO(b/63762267): Be more aggressive about specifying alignment.
- ir_builder_.CreateMemCpy(operand_ptrs.back(), /*DstAlign=*/1, in_ptr,
- /*SrcAlign=*/1,
- ShapeUtil::ByteSizeOf(operand_shape));
+ b_.CreateMemCpy(operand_ptrs.back(), /*DstAlign=*/1, in_ptr,
+ /*SrcAlign=*/1, ShapeUtil::ByteSizeOf(operand_shape));
}
- llvm_ir::EmitTuple(GetIrArrayFor(crs), operand_ptrs, &ir_builder_, module_);
+ llvm_ir::EmitTuple(GetIrArrayFor(crs), operand_ptrs, &b_, module_);
return Status::OK();
}
@@ -1278,9 +1249,8 @@ Status IrEmitter::HandleParameter(HloInstruction* parameter) {
// example, float for an XLA F32 element type).
llvm::Value* params = compute_function_->parameters_arg();
llvm::Value* param_address_offset =
- llvm_ir::EmitBufferIndexingGEP(params, param_number, &ir_builder_);
- llvm::LoadInst* param_address_untyped =
- ir_builder_.CreateLoad(param_address_offset);
+ llvm_ir::EmitBufferIndexingGEP(params, param_number, &b_);
+ llvm::LoadInst* param_address_untyped = b_.CreateLoad(param_address_offset);
param_address_untyped->setName(AsStringRef(IrName(parameter, "untyped")));
if (is_top_level_computation_ &&
hlo_module_config_.debug_options()
@@ -1295,7 +1265,7 @@ Status IrEmitter::HandleParameter(HloInstruction* parameter) {
llvm::MDNode::get(param_address_untyped->getContext(), /*MDs=*/{}));
}
- llvm::Value* param_address_typed = ir_builder_.CreateBitCast(
+ llvm::Value* param_address_typed = b_.CreateBitCast(
param_address_untyped, IrShapeType(param_shape)->getPointerTo());
emitted_value_[parameter] = param_address_typed;
@@ -1403,62 +1373,61 @@ IrEmitter::ReductionGenerator IrEmitter::MatchReductionGenerator(
return nullptr;
case HloOpcode::kAdd:
- return [root_is_integral](llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
+ return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) {
- return root_is_integral ? ir_builder->CreateAdd(lhs, rhs)
- : ir_builder->CreateFAdd(lhs, rhs);
+ return root_is_integral ? b->CreateAdd(lhs, rhs)
+ : b->CreateFAdd(lhs, rhs);
};
case HloOpcode::kMultiply:
- return [root_is_integral](llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
+ return [root_is_integral](llvm::IRBuilder<>* b, llvm::Value* lhs,
llvm::Value* rhs) {
- return root_is_integral ? ir_builder->CreateMul(lhs, rhs)
- : ir_builder->CreateFMul(lhs, rhs);
+ return root_is_integral ? b->CreateMul(lhs, rhs)
+ : b->CreateFMul(lhs, rhs);
};
case HloOpcode::kAnd:
- return [](llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
- llvm::Value* rhs) { return ir_builder->CreateAnd(lhs, rhs); };
+ return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
+ return b->CreateAnd(lhs, rhs);
+ };
case HloOpcode::kOr:
- return [](llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
- llvm::Value* rhs) { return ir_builder->CreateOr(lhs, rhs); };
+ return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
+ return b->CreateOr(lhs, rhs);
+ };
case HloOpcode::kXor:
- return [](llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
- llvm::Value* rhs) { return ir_builder->CreateXor(lhs, rhs); };
+ return [](llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
+ return b->CreateXor(lhs, rhs);
+ };
case HloOpcode::kMaximum:
return [root_is_floating_point, root_is_signed](
- llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
- llvm::Value* rhs) {
+ llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
if (root_is_floating_point) {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::maxnum,
- {lhs, rhs}, {lhs->getType()},
- ir_builder);
+ {lhs, rhs}, {lhs->getType()}, b);
}
- return ir_builder->CreateSelect(
- ir_builder->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SGE
- : llvm::ICmpInst::ICMP_UGE,
- lhs, rhs),
+ return b->CreateSelect(
+ b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SGE
+ : llvm::ICmpInst::ICMP_UGE,
+ lhs, rhs),
lhs, rhs);
};
case HloOpcode::kMinimum:
return [root_is_floating_point, root_is_signed](
- llvm::IRBuilder<>* ir_builder, llvm::Value* lhs,
- llvm::Value* rhs) {
+ llvm::IRBuilder<>* b, llvm::Value* lhs, llvm::Value* rhs) {
if (root_is_floating_point) {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::minnum,
- {lhs, rhs}, {lhs->getType()},
- ir_builder);
+ {lhs, rhs}, {lhs->getType()}, b);
}
- return ir_builder->CreateSelect(
- ir_builder->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SLE
- : llvm::ICmpInst::ICMP_ULE,
- lhs, rhs),
+ return b->CreateSelect(
+ b->CreateICmp(root_is_signed ? llvm::ICmpInst::ICMP_SLE
+ : llvm::ICmpInst::ICMP_ULE,
+ lhs, rhs),
lhs, rhs);
};
}
@@ -1527,34 +1496,31 @@ IrEmitter::EmitInnerLoopForVectorizedReduction(
accumulator.reserve(accumulator_type.size());
for (auto accumulator_shard_type : accumulator_type) {
accumulator.push_back(llvm_ir::EmitAllocaAtFunctionEntry(
- accumulator_shard_type, "accumulator", &ir_builder_, 0));
+ accumulator_shard_type, "accumulator", &b_, 0));
}
- llvm::Value* init_value_ssa =
- ir_builder_.CreateLoad(GetEmittedValueFor(init_value));
+ llvm::Value* init_value_ssa = b_.CreateLoad(GetEmittedValueFor(init_value));
for (llvm::Value* accumulator_shard : accumulator) {
llvm::Value* initial_value;
auto shard_type = accumulator_shard->getType()->getPointerElementType();
if (auto vector_type = llvm::dyn_cast<llvm::VectorType>(shard_type)) {
- initial_value = ir_builder_.CreateVectorSplat(
- vector_type->getNumElements(), init_value_ssa);
+ initial_value =
+ b_.CreateVectorSplat(vector_type->getNumElements(), init_value_ssa);
} else {
initial_value = init_value_ssa;
}
- ir_builder_.CreateAlignedStore(initial_value, accumulator_shard,
- element_alignment);
+ b_.CreateAlignedStore(initial_value, accumulator_shard, element_alignment);
}
llvm_ir::ForLoopNest reduction_loop_nest(IrName(arg, "vectorized_inner"),
- &ir_builder_);
+ &b_);
llvm_ir::IrArray::Index reduced_dims_index =
reduction_loop_nest.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
"reduction_dim");
- SetToFirstInsertPoint(reduction_loop_nest.GetInnerLoopBodyBasicBlock(),
- &ir_builder_);
+ SetToFirstInsertPoint(reduction_loop_nest.GetInnerLoopBodyBasicBlock(), &b_);
llvm_ir::IrArray arg_array(GetIrArrayFor(arg));
llvm_ir::IrArray::Index input_index = reduced_dims_index;
@@ -1567,38 +1533,34 @@ IrEmitter::EmitInnerLoopForVectorizedReduction(
}
CHECK(output_index.end() == it);
- llvm::Value* input_address = ir_builder_.CreateBitCast(
- arg_array.EmitArrayElementAddress(input_index, &ir_builder_),
- ir_builder_.getInt8PtrTy());
+ llvm::Value* input_address = b_.CreateBitCast(
+ arg_array.EmitArrayElementAddress(input_index, &b_), b_.getInt8PtrTy());
for (int i = 0; i < accumulator.size(); i++) {
auto input_address_typed =
- ir_builder_.CreateBitCast(input_address, accumulator[i]->getType());
+ b_.CreateBitCast(input_address, accumulator[i]->getType());
auto current_accumulator_value =
- ir_builder_.CreateAlignedLoad(accumulator[i], element_alignment);
- auto addend =
- ir_builder_.CreateAlignedLoad(input_address_typed, element_alignment);
+ b_.CreateAlignedLoad(accumulator[i], element_alignment);
+ auto addend = b_.CreateAlignedLoad(input_address_typed, element_alignment);
arg_array.AnnotateLoadStoreInstructionWithMetadata(addend);
auto reduced_result =
- reduction_generator(&ir_builder_, current_accumulator_value, addend);
- ir_builder_.CreateAlignedStore(reduced_result, accumulator[i],
- element_alignment);
+ reduction_generator(&b_, current_accumulator_value, addend);
+ b_.CreateAlignedStore(reduced_result, accumulator[i], element_alignment);
if (i != (accumulator.size() - 1)) {
- input_address = ir_builder_.CreateConstInBoundsGEP1_32(
- reduced_result->getType(), input_address_typed, 1);
+ input_address = b_.CreateConstInBoundsGEP1_32(reduced_result->getType(),
+ input_address_typed, 1);
}
}
- SetToFirstInsertPoint(reduction_loop_nest.GetOuterLoopExitBasicBlock(),
- &ir_builder_);
+ SetToFirstInsertPoint(reduction_loop_nest.GetOuterLoopExitBasicBlock(), &b_);
ShardedVector result_ssa;
result_ssa.reserve(accumulator.size());
for (auto accumulator_shard : accumulator) {
result_ssa.push_back(
- ir_builder_.CreateAlignedLoad(accumulator_shard, element_alignment));
+ b_.CreateAlignedLoad(accumulator_shard, element_alignment));
}
return result_ssa;
}
@@ -1607,17 +1569,17 @@ void IrEmitter::EmitShardedVectorStore(
llvm::Value* store_address, const std::vector<llvm::Value*>& value_to_store,
const int alignment, const llvm_ir::IrArray& containing_array) {
for (int i = 0; i < value_to_store.size(); i++) {
- auto store_address_typed = ir_builder_.CreateBitCast(
+ auto store_address_typed = b_.CreateBitCast(
store_address,
llvm::PointerType::getUnqual(value_to_store[i]->getType()));
- auto store_instruction = ir_builder_.CreateAlignedStore(
+ auto store_instruction = b_.CreateAlignedStore(
value_to_store[i], store_address_typed, alignment);
containing_array.AnnotateLoadStoreInstructionWithMetadata(
store_instruction);
if (i != (value_to_store.size() - 1)) {
- store_address = ir_builder_.CreateConstInBoundsGEP1_32(
+ store_address = b_.CreateConstInBoundsGEP1_32(
value_to_store[i]->getType(), store_address_typed, 1);
}
}
@@ -1683,8 +1645,8 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
// }
// }
- llvm_ir::ForLoopNest loop_nest(IrName(reduce), &ir_builder_);
- llvm_ir::IrArray::Index array_index(ir_builder_.getInt64Ty(),
+ llvm_ir::ForLoopNest loop_nest(IrName(reduce), &b_);
+ llvm_ir::IrArray::Index array_index(b_.getInt64Ty(),
reduce->shape().dimensions_size());
for (int i = LayoutUtil::MinorToMajor(reduce->shape()).size() - 1; i > 0;
--i) {
@@ -1703,7 +1665,7 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
if (llvm::BasicBlock* innermost_body_bb =
loop_nest.GetInnerLoopBodyBasicBlock()) {
- SetToFirstInsertPoint(innermost_body_bb, &ir_builder_);
+ SetToFirstInsertPoint(innermost_body_bb, &b_);
}
auto outermost_loop_exit_block = loop_nest.GetOuterLoopExitBasicBlock();
@@ -1717,7 +1679,7 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
tensorflow::strings::Printf("dim.%lld", innermost_dimension));
array_index[innermost_dimension] = loop->GetIndVarValue();
- SetToFirstInsertPoint(loop->GetBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loop->GetBodyBasicBlock(), &b_);
ShardedVectorType vector_type = CreateShardedVectorType(
reduce->shape().element_type(), vectorization_factor);
@@ -1728,16 +1690,16 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
llvm_ir::IrArray target_array = GetIrArrayFor(reduce);
llvm::Value* output_address =
- target_array.EmitArrayElementAddress(array_index, &ir_builder_);
+ target_array.EmitArrayElementAddress(array_index, &b_);
EmitShardedVectorStore(output_address, accumulator, element_alignment,
target_array);
if (auto exit_terminator = loop->GetExitBasicBlock()->getTerminator()) {
CHECK_GT(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);
- ir_builder_.SetInsertPoint(exit_terminator);
+ b_.SetInsertPoint(exit_terminator);
} else {
CHECK_EQ(LayoutUtil::MinorToMajor(reduce->shape()).size(), 1);
- ir_builder_.SetInsertPoint(loop->GetExitBasicBlock());
+ b_.SetInsertPoint(loop->GetExitBasicBlock());
}
}
@@ -1747,8 +1709,8 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
if (innermost_dimension_size % vectorization_factor) {
// TODO(b/63775531): Consider using a scalar loop here to save on code size.
array_index[innermost_dimension] =
- ir_builder_.getInt64(innermost_dimension_size -
- (innermost_dimension_size % vectorization_factor));
+ b_.getInt64(innermost_dimension_size -
+ (innermost_dimension_size % vectorization_factor));
ShardedVectorType vector_type = CreateShardedVectorType(
reduce->shape().element_type(),
@@ -1760,13 +1722,13 @@ StatusOr<bool> IrEmitter::EmitVectorizedReduce(
llvm_ir::IrArray target_array = GetIrArrayFor(reduce);
llvm::Value* output_address =
- target_array.EmitArrayElementAddress(array_index, &ir_builder_);
+ target_array.EmitArrayElementAddress(array_index, &b_);
EmitShardedVectorStore(output_address, accumulator, element_alignment,
target_array);
}
if (outermost_loop_exit_block) {
- ir_builder_.SetInsertPoint(outermost_loop_exit_block);
+ b_.SetInsertPoint(outermost_loop_exit_block);
}
return true;
@@ -1785,22 +1747,22 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduce(
PrimitiveType accumulator_type = reduce->shape().element_type();
llvm::AllocaInst* accumulator_addr = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(accumulator_type, module_), "accumulator",
- &ir_builder_, MinimumAlignmentForPrimitiveType(accumulator_type));
+ &b_, MinimumAlignmentForPrimitiveType(accumulator_type));
llvm::Value* init_value_addr = GetEmittedValueFor(init_value);
- llvm::Value* load_init_value = ir_builder_.CreateLoad(init_value_addr);
- ir_builder_.CreateStore(load_init_value, accumulator_addr);
+ llvm::Value* load_init_value = b_.CreateLoad(init_value_addr);
+ b_.CreateStore(load_init_value, accumulator_addr);
// The enclosing loops go over all the target elements. Now we have to compute
// the actual target element. For this, we build a new loop nest to iterate
// over all the reduction dimensions in the argument.
// AddLoopsForShapeOnDimensions will return an Index where induction Value*s
// are placed for each dimension in dimensions, and all the rest are nullptrs.
- llvm_ir::ForLoopNest loops(IrName(reduce, "inner"), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(reduce, "inner"), &b_);
const llvm_ir::IrArray::Index reduced_dims_index =
loops.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
"reduction_dim");
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
// Build a full index for the input argument, using reduced_dims_index as the
// base. In reduced_dims_index only the reduction dimensions are filled in. We
@@ -1820,14 +1782,14 @@ StatusOr<llvm::Value*> IrEmitter::EmitTargetElementLoopBodyForReduce(
// Apply the reduction function to the loaded value.
llvm::Value* input_address =
- arg_array.EmitArrayElementAddress(input_index, &ir_builder_);
+ arg_array.EmitArrayElementAddress(input_index, &b_);
llvm::Value* result = EmitElementFunctionCall(
reducer_function, reduce->shape(), {accumulator_addr, input_address},
"reduce_function");
- ir_builder_.CreateStore(result, accumulator_addr);
+ b_.CreateStore(result, accumulator_addr);
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(accumulator_addr);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
+ return b_.CreateLoad(accumulator_addr);
}
Status IrEmitter::HandleReduce(HloInstruction* reduce) {
@@ -1957,7 +1919,7 @@ Status IrEmitter::HandleSlice(HloInstruction* slice) {
llvm_ir::IrArray target_array = GetIrArrayFor(slice);
const int64 num_outer_loops = outer_dims.size();
- llvm_ir::ForLoopNest loops(IrName(slice), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(slice), &b_);
llvm_ir::IrArray::Index target_index =
loops.AddLoopsForShapeOnDimensions(slice->shape(), outer_dims, "slice");
@@ -1966,21 +1928,21 @@ Status IrEmitter::HandleSlice(HloInstruction* slice) {
// for the rest of the dimensions the copy writes to the full dimension.
std::replace(target_index.begin(), target_index.end(),
static_cast<llvm::Value*>(nullptr),
- static_cast<llvm::Value*>(ir_builder_.getInt64(0)));
+ static_cast<llvm::Value*>(b_.getInt64(0)));
if (num_outer_loops > 0) {
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
}
llvm_ir::IrArray source_array = GetIrArrayFor(operand);
const llvm_ir::IrArray::Index source_index = target_index.SourceIndexOfSlice(
/*shape=*/slice->shape(), /*starts=*/slice->slice_starts(),
- /*strides=*/slice->slice_strides(), /*builder=*/&ir_builder_);
+ /*strides=*/slice->slice_strides(), /*builder=*/&b_);
- llvm::Value* memcpy_dest = target_array.EmitArrayElementAddress(
- target_index, &ir_builder_, "slice.dest");
- llvm::Value* memcpy_source = source_array.EmitArrayElementAddress(
- source_index, &ir_builder_, "slice.source");
+ llvm::Value* memcpy_dest =
+ target_array.EmitArrayElementAddress(target_index, &b_, "slice.dest");
+ llvm::Value* memcpy_source =
+ source_array.EmitArrayElementAddress(source_index, &b_, "slice.source");
const int64 memcpy_elements =
primitive_elements_per_logical_element * memcpy_logical_elements;
@@ -1997,7 +1959,7 @@ Status IrEmitter::HandleSlice(HloInstruction* slice) {
}
if (num_outer_loops > 0) {
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
}
return Status::OK();
@@ -2023,7 +1985,7 @@ Status IrEmitter::HandleDynamicUpdateSlice(
auto operands = GetIrArraysForOperandsOf(dynamic_update_slice);
return llvm_ir::EmitDynamicUpdateSliceInPlace(
operands, GetIrArrayFor(dynamic_update_slice),
- IrName(dynamic_update_slice, "in_place"), &ir_builder_);
+ IrName(dynamic_update_slice, "in_place"), &b_);
}
return DefaultAction(dynamic_update_slice);
}
@@ -2057,43 +2019,41 @@ Status IrEmitter::HandlePad(HloInstruction* pad) {
[this, pad](const llvm_ir::IrArray::Index& target_index) {
const HloInstruction* padding_value = pad->operand(1);
llvm::Value* padding_value_addr = GetEmittedValueFor(padding_value);
- return ir_builder_.CreateLoad(padding_value_addr);
+ return b_.CreateLoad(padding_value_addr);
}));
// Create a loop to iterate over the operand elements and update the output
// locations where the operand elements should be stored.
- llvm_ir::ForLoopNest loops(IrName(pad, "assign"), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(pad, "assign"), &b_);
const HloInstruction* operand = pad->operand(0);
const llvm_ir::IrArray::Index operand_index =
loops.AddLoopsForShape(operand->shape(), "operand");
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
// Load an element from the operand.
llvm_ir::IrArray operand_array(GetIrArrayFor(operand));
llvm::Value* operand_data =
- operand_array.EmitReadArrayElement(operand_index, &ir_builder_);
+ operand_array.EmitReadArrayElement(operand_index, &b_);
// Compute the output index the operand element should be assigned to.
// output_index := edge_padding_low + operand_index * (interior_padding + 1)
const PaddingConfig& padding_config = pad->padding_config();
llvm_ir::IrArray::Index output_index(operand_index.GetType());
for (size_t i = 0; i < operand_index.size(); ++i) {
- llvm::Value* offset = ir_builder_.CreateMul(
+ llvm::Value* offset = b_.CreateMul(
operand_index[i],
- ir_builder_.getInt64(padding_config.dimensions(i).interior_padding() +
- 1));
- llvm::Value* index = ir_builder_.CreateAdd(
- offset,
- ir_builder_.getInt64(padding_config.dimensions(i).edge_padding_low()));
+ b_.getInt64(padding_config.dimensions(i).interior_padding() + 1));
+ llvm::Value* index = b_.CreateAdd(
+ offset, b_.getInt64(padding_config.dimensions(i).edge_padding_low()));
output_index.push_back(index);
}
// Store the operand element to the computed output location.
llvm_ir::IrArray output_array(GetIrArrayFor(pad));
- output_array.EmitWriteArrayElement(output_index, operand_data, &ir_builder_);
+ output_array.EmitWriteArrayElement(output_index, operand_data, &b_);
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
return Status::OK();
}
@@ -2115,8 +2075,7 @@ Status IrEmitter::HandleFusion(HloInstruction* fusion) {
// Delegate to common implementation of fused in-place dynamic-update-slice.
auto operands = GetIrArraysForOperandsOf(fusion);
return llvm_ir::EmitFusedDynamicUpdateSliceInPlace(
- fusion, operands, GetIrArrayFor(fusion), &elemental_emitter,
- &ir_builder_);
+ fusion, operands, GetIrArrayFor(fusion), &elemental_emitter, &b_);
} else if (fusion->fusion_kind() == HloInstruction::FusionKind::kLoop) {
VLOG(3) << "HandleFusion kLoop";
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
@@ -2151,7 +2110,7 @@ Status IrEmitter::HandleFusion(HloInstruction* fusion) {
TF_RETURN_IF_ERROR(DotOpEmitter::EmitDotOperation(
*dot, target_array, lhs_array, rhs_array, &addend_array,
- GetExecutableRunOptionsArgument(), &ir_builder_, hlo_module_config_,
+ GetExecutableRunOptionsArgument(), &b_, hlo_module_config_,
target_machine_features_));
return Status::OK();
} else {
@@ -2174,7 +2133,7 @@ Status IrEmitter::HandleCall(HloInstruction* call) {
// ParallelTaskAssignment assigned partitions, emit call to
// ParallelForkJoin.
std::vector<llvm::Value*> call_args = GetArrayFunctionCallArguments(
- parameter_addresses, &ir_builder_, computation->name(),
+ parameter_addresses, &b_, computation->name(),
/*return_value_buffer=*/emitted_value_[call],
/*exec_run_options_arg=*/GetExecutableRunOptionsArgument(),
/*temp_buffers_arg=*/GetTempBuffersArgument(),
@@ -2182,8 +2141,8 @@ Status IrEmitter::HandleCall(HloInstruction* call) {
HloInstruction* root = computation->root_instruction();
TF_RETURN_IF_ERROR(EmitCallToParallelForkJoin(
- call_args, root->shape(), root->outer_dimension_partitions(),
- &ir_builder_, call_ir_function, computation->name()));
+ call_args, root->shape(), root->outer_dimension_partitions(), &b_,
+ call_ir_function, computation->name()));
} else {
EmitArrayFunctionCallInto(call_ir_function, parameter_addresses,
emitted_value_[call], computation->name());
@@ -2195,33 +2154,31 @@ Status IrEmitter::HandleCall(HloInstruction* call) {
Status IrEmitter::HandleCustomCall(HloInstruction* custom_call) {
gtl::ArraySlice<HloInstruction*> operands(custom_call->operands());
tensorflow::StringPiece custom_call_target(custom_call->custom_call_target());
- llvm::Type* i8_ptr_type = ir_builder_.getInt8PtrTy();
+ llvm::Type* i8_ptr_type = b_.getInt8PtrTy();
llvm::AllocaInst* operands_alloca =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
- i8_ptr_type, ir_builder_.getInt32(operands.size()),
- "cc_operands_alloca", &ir_builder_);
+ i8_ptr_type, b_.getInt32(operands.size()), "cc_operands_alloca", &b_);
for (size_t i = 0; i < operands.size(); ++i) {
const HloInstruction* operand = operands[i];
llvm::Value* operand_as_i8ptr =
- ir_builder_.CreatePointerCast(GetEmittedValueFor(operand), i8_ptr_type);
- llvm::Value* slot_in_operands_alloca = ir_builder_.CreateInBoundsGEP(
- operands_alloca, {ir_builder_.getInt64(i)});
- ir_builder_.CreateStore(operand_as_i8ptr, slot_in_operands_alloca);
+ b_.CreatePointerCast(GetEmittedValueFor(operand), i8_ptr_type);
+ llvm::Value* slot_in_operands_alloca =
+ b_.CreateInBoundsGEP(operands_alloca, {b_.getInt64(i)});
+ b_.CreateStore(operand_as_i8ptr, slot_in_operands_alloca);
}
auto* custom_call_ir_function =
llvm::cast<llvm::Function>(module_->getOrInsertFunction(
AsStringRef(custom_call_target),
llvm::FunctionType::get(
- /*Result=*/ir_builder_.getVoidTy(),
+ /*Result=*/b_.getVoidTy(),
/*Params=*/{i8_ptr_type, operands_alloca->getType()},
/*isVarArg=*/false)));
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(custom_call));
- auto* output_address_arg = ir_builder_.CreatePointerCast(
- GetEmittedValueFor(custom_call), i8_ptr_type);
+ auto* output_address_arg =
+ b_.CreatePointerCast(GetEmittedValueFor(custom_call), i8_ptr_type);
- ir_builder_.CreateCall(custom_call_ir_function,
- {output_address_arg, operands_alloca});
+ b_.CreateCall(custom_call_ir_function, {output_address_arg, operands_alloca});
return Status::OK();
}
@@ -2286,8 +2243,8 @@ Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
llvm::BasicBlock* header_bb = llvm::BasicBlock::Create(
module_->getContext(), AsStringRef(IrName(xla_while, "header")),
compute_function_->function());
- ir_builder_.CreateBr(header_bb);
- ir_builder_.SetInsertPoint(header_bb);
+ b_.CreateBr(header_bb);
+ b_.SetInsertPoint(header_bb);
// Calls the condition function to determine whether to proceed with the
// body. It must return a bool, so use the scalar call form.
@@ -2295,7 +2252,7 @@ Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
llvm::Value* while_condition = EmitElementFunctionCall(
condition_ir_function, condition->root_instruction()->shape(),
{while_result}, IrName(xla_while, "cond"));
- llvm::Value* while_predicate = ir_builder_.CreateICmpNE(
+ llvm::Value* while_predicate = b_.CreateICmpNE(
while_condition,
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0));
@@ -2305,20 +2262,20 @@ Status IrEmitter::HandleWhile(HloInstruction* xla_while) {
compute_function_->function());
llvm::BasicBlock* exit_bb = llvm::BasicBlock::Create(
module_->getContext(), AsStringRef(IrName(xla_while, "exit")));
- ir_builder_.CreateCondBr(while_predicate, body_bb, exit_bb);
+ b_.CreateCondBr(while_predicate, body_bb, exit_bb);
// Calls the body function from the body block.
- ir_builder_.SetInsertPoint(body_bb);
+ b_.SetInsertPoint(body_bb);
// Calls the body function.
EmitArrayFunctionCallInto(body_ir_function, {while_result}, while_result,
IrName(xla_while, "body"));
// Finishes with a branch back to the header.
- ir_builder_.CreateBr(header_bb);
+ b_.CreateBr(header_bb);
// Adds the exit block to the function and sets the insert point there.
compute_function_->function()->getBasicBlockList().push_back(exit_bb);
- ir_builder_.SetInsertPoint(exit_bb);
+ b_.SetInsertPoint(exit_bb);
return Status::OK();
}
@@ -2360,21 +2317,21 @@ StatusOr<bool> IrEmitter::EmitFastConcatenate(
std::vector<int64> outer_dims(std::next(concat_dim_layout_itr),
output_min2maj.end());
- llvm::Type* i8_ptr_type = ir_builder_.getInt8PtrTy();
- llvm::Type* i8_type = ir_builder_.getInt8Ty();
+ llvm::Type* i8_ptr_type = b_.getInt8PtrTy();
+ llvm::Type* i8_type = b_.getInt8Ty();
TF_RETURN_IF_ERROR(EmitTargetAddressForOp(concatenate));
llvm_ir::IrArray target_array = GetIrArrayFor(concatenate);
- llvm_ir::ForLoopNest loops(IrName(concatenate), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(concatenate), &b_);
llvm_ir::IrArray::Index outer_dims_index =
loops.AddLoopsForShapeOnDimensions(output_shape, outer_dims, "concat");
std::replace(outer_dims_index.begin(), outer_dims_index.end(),
static_cast<llvm::Value*>(nullptr),
- static_cast<llvm::Value*>(ir_builder_.getInt64(0)));
+ static_cast<llvm::Value*>(b_.getInt64(0)));
if (!outer_dims.empty()) {
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
}
PrimitiveType primitive_type = output_shape.element_type();
@@ -2383,10 +2340,10 @@ StatusOr<bool> IrEmitter::EmitFastConcatenate(
// Contiguous subregions from each operand to the concatenate contribute to a
// contiguous subregion in the target buffer starting at target_region_begin.
- llvm::Value* target_region_begin = ir_builder_.CreateBitCast(
- target_array.EmitArrayElementAddress(outer_dims_index, &ir_builder_,
- "target_region"),
- i8_ptr_type);
+ llvm::Value* target_region_begin =
+ b_.CreateBitCast(target_array.EmitArrayElementAddress(
+ outer_dims_index, &b_, "target_region"),
+ i8_ptr_type);
int64 byte_offset_into_target_region = 0;
int64 inner_dims_product =
@@ -2400,14 +2357,13 @@ StatusOr<bool> IrEmitter::EmitFastConcatenate(
for (HloInstruction* operand : operands) {
const Shape& input_shape = operand->shape();
llvm_ir::IrArray source_array = GetIrArrayFor(operand);
- llvm::Value* copy_source_address = ir_builder_.CreateBitCast(
- source_array.EmitArrayElementAddress(outer_dims_index, &ir_builder_,
- "src_addr"),
+ llvm::Value* copy_source_address = b_.CreateBitCast(
+ source_array.EmitArrayElementAddress(outer_dims_index, &b_, "src_addr"),
i8_ptr_type);
- llvm::Value* copy_target_address = ir_builder_.CreateGEP(
- i8_type, target_region_begin,
- ir_builder_.getInt64(byte_offset_into_target_region));
+ llvm::Value* copy_target_address =
+ b_.CreateGEP(i8_type, target_region_begin,
+ b_.getInt64(byte_offset_into_target_region));
EmitTransferElements(
copy_target_address, copy_source_address,
@@ -2420,7 +2376,7 @@ StatusOr<bool> IrEmitter::EmitFastConcatenate(
}
if (!outer_dims.empty()) {
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
}
return true;
@@ -2439,16 +2395,15 @@ void IrEmitter::EmitTransferElements(llvm::Value* target, llvm::Value* source,
llvm_ir::PrimitiveTypeToIrType(primitive_type, module_));
if (element_count == 1) {
- auto* load_instruction = ir_builder_.CreateAlignedLoad(
- ir_builder_.CreateBitCast(source, primitive_ptr_type),
- element_alignment);
+ auto* load_instruction = b_.CreateAlignedLoad(
+ b_.CreateBitCast(source, primitive_ptr_type), element_alignment);
source_array.AnnotateLoadStoreInstructionWithMetadata(load_instruction);
- auto* store_instruction = ir_builder_.CreateAlignedStore(
- load_instruction, ir_builder_.CreateBitCast(target, primitive_ptr_type),
+ auto* store_instruction = b_.CreateAlignedStore(
+ load_instruction, b_.CreateBitCast(target, primitive_ptr_type),
element_alignment);
target_array.AnnotateLoadStoreInstructionWithMetadata(store_instruction);
} else {
- auto* memcpy_instruction = ir_builder_.CreateMemCpy(
+ auto* memcpy_instruction = b_.CreateMemCpy(
target, /*DstAlign=*/element_alignment, source,
/*SrcAlign=*/element_alignment, element_count * primitive_type_size);
@@ -2518,24 +2473,24 @@ Status IrEmitter::HandleConditional(HloInstruction* conditional) {
// cond_result = true_computation(true_operand)
// else
// cond_result = false_computation(false_operand)
- llvm::LoadInst* pred_value = ir_builder_.CreateLoad(
+ llvm::LoadInst* pred_value = b_.CreateLoad(
GetIrArrayFor(pred).GetBasePointer(), "load_predicate_value");
- llvm::Value* pred_cond = ir_builder_.CreateICmpNE(
+ llvm::Value* pred_cond = b_.CreateICmpNE(
pred_value,
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(PRED, module_), 0),
"boolean_predicate");
llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(pred_cond, "conditional", &ir_builder_);
+ llvm_ir::EmitIfThenElse(pred_cond, "conditional", &b_);
- SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+ SetToFirstInsertPoint(if_data.true_block, &b_);
EmitArrayFunctionCallInto(true_function, {GetEmittedValueFor(true_arg)},
conditional_result, IrName(conditional, "_true"));
- SetToFirstInsertPoint(if_data.false_block, &ir_builder_);
+ SetToFirstInsertPoint(if_data.false_block, &b_);
EmitArrayFunctionCallInto(false_function, {GetEmittedValueFor(false_arg)},
conditional_result, IrName(conditional, "_false"));
- SetToFirstInsertPoint(if_data.after_block, &ir_builder_);
+ SetToFirstInsertPoint(if_data.after_block, &b_);
return Status::OK();
}
@@ -2546,6 +2501,11 @@ Status IrEmitter::HandleAfterAll(HloInstruction* gen_token) {
return Status::OK();
}
+Status IrEmitter::HandleIota(HloInstruction* iota) {
+ // TODO(b/64798317): implement iota on CPU.
+ return Unimplemented("Iota is not implemented on CPU.");
+}
+
Status IrEmitter::FinishVisit(HloInstruction* root) {
// When this method is called, we should have already emitted an IR value for
// the root (return) op. The IR value holds the address of the buffer holding
@@ -2563,7 +2523,7 @@ Status IrEmitter::FinishVisit(HloInstruction* root) {
auto record_complete_computation = [&](llvm::Value* prof_counter) {
if (prof_counter) {
- profiling_state_.RecordCompleteComputation(&ir_builder_, prof_counter);
+ profiling_state_.RecordCompleteComputation(&b_, prof_counter);
}
};
@@ -2585,54 +2545,51 @@ llvm::Value* IrEmitter::GetProfileCounterCommon(
int64 prof_counter_idx = it->second;
string counter_name = IrName("prof_counter", hlo.name());
- return ir_builder_.CreateGEP(GetProfileCountersArgument(),
- ir_builder_.getInt64(prof_counter_idx),
- AsStringRef(counter_name));
+ return b_.CreateGEP(GetProfileCountersArgument(),
+ b_.getInt64(prof_counter_idx), AsStringRef(counter_name));
}
-void IrEmitter::ProfilingState::UpdateProfileCounter(
- llvm::IRBuilder<>* ir_builder, llvm::Value* prof_counter,
- llvm::Value* cycle_end, llvm::Value* cycle_start) {
- auto* cycle_diff = ir_builder->CreateSub(cycle_end, cycle_start);
+void IrEmitter::ProfilingState::UpdateProfileCounter(llvm::IRBuilder<>* b,
+ llvm::Value* prof_counter,
+ llvm::Value* cycle_end,
+ llvm::Value* cycle_start) {
+ auto* cycle_diff = b->CreateSub(cycle_end, cycle_start);
llvm::LoadInst* old_cycle_count =
- ir_builder->CreateLoad(prof_counter, "old_cycle_count");
+ b->CreateLoad(prof_counter, "old_cycle_count");
auto* new_cycle_count =
- ir_builder->CreateAdd(cycle_diff, old_cycle_count, "new_cycle_count");
- ir_builder->CreateStore(new_cycle_count, prof_counter);
+ b->CreateAdd(cycle_diff, old_cycle_count, "new_cycle_count");
+ b->CreateStore(new_cycle_count, prof_counter);
}
-llvm::Value* IrEmitter::ProfilingState::ReadCycleCounter(
- llvm::IRBuilder<>* ir_builder) {
- llvm::Module* module = ir_builder->GetInsertBlock()->getModule();
+llvm::Value* IrEmitter::ProfilingState::ReadCycleCounter(llvm::IRBuilder<>* b) {
+ llvm::Module* module = b->GetInsertBlock()->getModule();
if (use_rdtscp_) {
llvm::Function* func_llvm_readcyclecounter =
llvm::Intrinsic::getDeclaration(module,
llvm::Intrinsic::readcyclecounter);
- return ir_builder->CreateCall(func_llvm_readcyclecounter);
+ return b->CreateCall(func_llvm_readcyclecounter);
}
llvm::Function* func_llvm_x86_rdtscp =
llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::x86_rdtscp);
if (!aux_i8ptr_) {
- llvm::AllocaInst* rdtscp_aux = llvm_ir::EmitAllocaAtFunctionEntry(
- ir_builder->getInt32Ty(), "rdtscp_aux", ir_builder);
- aux_i8ptr_ =
- ir_builder->CreateBitCast(rdtscp_aux, ir_builder->getInt8PtrTy());
+ llvm::AllocaInst* rdtscp_aux =
+ llvm_ir::EmitAllocaAtFunctionEntry(b->getInt32Ty(), "rdtscp_aux", b);
+ aux_i8ptr_ = b->CreateBitCast(rdtscp_aux, b->getInt8PtrTy());
}
- llvm::ConstantInt* alloca_size = ir_builder->getInt64(4);
+ llvm::ConstantInt* alloca_size = b->getInt64(4);
llvm::Function* func_llvm_lifetime_start =
llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::lifetime_start);
- ir_builder->CreateCall(func_llvm_lifetime_start, {alloca_size, aux_i8ptr_});
- llvm::Value* rdtscp_call =
- ir_builder->CreateCall(func_llvm_x86_rdtscp, aux_i8ptr_);
+ b->CreateCall(func_llvm_lifetime_start, {alloca_size, aux_i8ptr_});
+ llvm::Value* rdtscp_call = b->CreateCall(func_llvm_x86_rdtscp, aux_i8ptr_);
llvm::Function* func_llvm_lifetime_end =
llvm::Intrinsic::getDeclaration(module, llvm::Intrinsic::lifetime_end);
- ir_builder->CreateCall(func_llvm_lifetime_end, {alloca_size, aux_i8ptr_});
+ b->CreateCall(func_llvm_lifetime_end, {alloca_size, aux_i8ptr_});
return rdtscp_call;
}
-void IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* ir_builder,
+void IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* b,
HloInstruction* hlo) {
- auto* cycle_start = ReadCycleCounter(ir_builder);
+ auto* cycle_start = ReadCycleCounter(b);
cycle_start->setName(AsStringRef(IrName(hlo, "cycle_start")));
cycle_starts_[hlo] = cycle_start;
if (first_read_cycle_start_ == nullptr) {
@@ -2640,20 +2597,20 @@ void IrEmitter::ProfilingState::RecordCycleStart(llvm::IRBuilder<>* ir_builder,
}
}
-void IrEmitter::ProfilingState::RecordCycleDelta(llvm::IRBuilder<>* ir_builder,
+void IrEmitter::ProfilingState::RecordCycleDelta(llvm::IRBuilder<>* b,
HloInstruction* hlo,
llvm::Value* prof_counter) {
- auto* cycle_end = ReadCycleCounter(ir_builder);
+ auto* cycle_end = ReadCycleCounter(b);
cycle_end->setName(AsStringRef(IrName(hlo, "cycle_end")));
auto* cycle_start = cycle_starts_[hlo];
- UpdateProfileCounter(ir_builder, prof_counter, cycle_end, cycle_start);
+ UpdateProfileCounter(b, prof_counter, cycle_end, cycle_start);
last_read_cycle_end_ = cycle_end;
}
void IrEmitter::ProfilingState::RecordCompleteComputation(
- llvm::IRBuilder<>* ir_builder, llvm::Value* prof_counter) {
+ llvm::IRBuilder<>* b, llvm::Value* prof_counter) {
if (last_read_cycle_end_ && first_read_cycle_start_) {
- UpdateProfileCounter(ir_builder, prof_counter, last_read_cycle_end_,
+ UpdateProfileCounter(b, prof_counter, last_read_cycle_end_,
first_read_cycle_start_);
}
}
@@ -2661,14 +2618,14 @@ void IrEmitter::ProfilingState::RecordCompleteComputation(
Status IrEmitter::Preprocess(HloInstruction* hlo) {
VLOG(3) << "Visiting: " << hlo->ToString();
if (instruction_to_profile_idx_.count(hlo)) {
- profiling_state_.RecordCycleStart(&ir_builder_, hlo);
+ profiling_state_.RecordCycleStart(&b_, hlo);
}
return Status::OK();
}
Status IrEmitter::Postprocess(HloInstruction* hlo) {
if (auto* prof_counter = GetProfileCounterFor(*hlo)) {
- profiling_state_.RecordCycleDelta(&ir_builder_, hlo, prof_counter);
+ profiling_state_.RecordCycleDelta(&b_, hlo, prof_counter);
}
return Status::OK();
}
@@ -2727,22 +2684,20 @@ llvm::Value* IrEmitter::EmitTempBufferPointer(
CHECK_EQ(1, assigned_buffers.size());
const Shape& shape = assigned_buffers.begin()->first->shape();
- llvm::AllocaInst*& tempbuf_address = thread_local_buffers_[{
- ir_builder_.GetInsertBlock()->getParent(), slice}];
+ llvm::AllocaInst*& tempbuf_address =
+ thread_local_buffers_[{b_.GetInsertBlock()->getParent(), slice}];
if (tempbuf_address == nullptr) {
tempbuf_address = llvm_ir::EmitAllocaAtFunctionEntry(
IrShapeType(shape),
- tensorflow::strings::StrCat("thread_local", slice.ToString()),
- &ir_builder_, MinimumAlignmentForShape(target_shape));
+ tensorflow::strings::StrCat("thread_local", slice.ToString()), &b_,
+ MinimumAlignmentForShape(target_shape));
}
- return ir_builder_.CreateBitCast(tempbuf_address,
- element_type->getPointerTo());
+ return b_.CreateBitCast(tempbuf_address, element_type->getPointerTo());
}
llvm::Value* tempbuf_address_ptr = llvm_ir::EmitBufferIndexingGEP(
- GetTempBuffersArgument(), slice.index(), &ir_builder_);
- llvm::LoadInst* tempbuf_address_base =
- ir_builder_.CreateLoad(tempbuf_address_ptr);
+ GetTempBuffersArgument(), slice.index(), &b_);
+ llvm::LoadInst* tempbuf_address_base = b_.CreateLoad(tempbuf_address_ptr);
if (is_top_level_computation_ &&
hlo_module_config_.debug_options()
.xla_llvm_enable_invariant_load_metadata()) {
@@ -2761,11 +2716,11 @@ llvm::Value* IrEmitter::EmitTempBufferPointer(
llvm::Value* tempbuf_address_untyped = tempbuf_address_base;
if (slice.offset() > 0) {
// Adjust the address to account for the slice offset.
- tempbuf_address_untyped = ir_builder_.CreateInBoundsGEP(
- tempbuf_address_base, ir_builder_.getInt64(slice.offset()));
+ tempbuf_address_untyped =
+ b_.CreateInBoundsGEP(tempbuf_address_base, b_.getInt64(slice.offset()));
}
- return ir_builder_.CreateBitCast(tempbuf_address_untyped,
- element_type->getPointerTo());
+ return b_.CreateBitCast(tempbuf_address_untyped,
+ element_type->getPointerTo());
}
// Emits a function call returning a single array element. Allocates space
@@ -2776,7 +2731,7 @@ llvm::Value* IrEmitter::EmitElementFunctionCall(
tensorflow::StringPiece name) {
llvm::Value* return_value_buffer = EmitArrayFunctionCall(
function, return_shape, 1, parameter_addresses, name);
- return ir_builder_.CreateLoad(
+ return b_.CreateLoad(
return_value_buffer,
AsStringRef(tensorflow::strings::StrCat(name, "_return_value")));
}
@@ -2794,9 +2749,9 @@ llvm::Value* IrEmitter::EmitElementFunctionCall(
void IrEmitter::EmitArrayFunctionCallInto(
llvm::Function* function, gtl::ArraySlice<llvm::Value*> parameter_addresses,
llvm::Value* return_value_buffer, tensorflow::StringPiece name) {
- ir_builder_.CreateCall(
- function, GetArrayFunctionCallArguments(
- parameter_addresses, &ir_builder_, name,
+ b_.CreateCall(function,
+ GetArrayFunctionCallArguments(
+ parameter_addresses, &b_, name,
/*return_value_buffer=*/return_value_buffer,
/*exec_run_options_arg=*/GetExecutableRunOptionsArgument(),
/*temp_buffers_arg=*/GetTempBuffersArgument(),
@@ -2808,13 +2763,13 @@ llvm::Value* IrEmitter::EmitArrayFunctionCall(
gtl::ArraySlice<llvm::Value*> parameter_addresses,
tensorflow::StringPiece name) {
llvm::Value* elements =
- llvm::ConstantInt::get(ir_builder_.getInt64Ty(), element_count);
+ llvm::ConstantInt::get(b_.getInt64Ty(), element_count);
PrimitiveType return_type = return_shape.element_type();
llvm::Value* return_value_buffer =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
llvm_ir::PrimitiveTypeToIrType(return_type, module_), elements,
- tensorflow::strings::StrCat(name, "_return_value_address"),
- &ir_builder_, MinimumAlignmentForPrimitiveType(return_type));
+ tensorflow::strings::StrCat(name, "_return_value_address"), &b_,
+ MinimumAlignmentForPrimitiveType(return_type));
EmitArrayFunctionCallInto(function, parameter_addresses, return_value_buffer,
name);
return return_value_buffer;
@@ -2836,8 +2791,7 @@ Status IrEmitter::EmitTargetAddressForOp(const HloInstruction* op) {
attr_builder.addDereferenceableAttr(ByteSizeOf(target_shape));
retval->addAttrs(attr_builder);
}
- addr = ir_builder_.CreateBitCast(retval,
- IrShapeType(target_shape)->getPointerTo());
+ addr = b_.CreateBitCast(retval, IrShapeType(target_shape)->getPointerTo());
} else {
// For other nodes, we need the temporary buffer allocated for this node to
// write the result into.
@@ -2879,14 +2833,14 @@ Status IrEmitter::EmitTargetElementLoop(
llvm_ir::IrArray(op_target_address, element_shape));
}
TF_RETURN_IF_ERROR(
- llvm_ir::LoopEmitter(element_generator, output_arrays, &ir_builder_)
+ llvm_ir::LoopEmitter(element_generator, output_arrays, &b_)
.EmitLoop(IrName(target_op)));
std::vector<llvm::Value*> tuple_operand_ptrs;
for (int64 i = 0; i < output_arrays.size(); ++i) {
tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());
}
- llvm_ir::EmitTuple(target_array, tuple_operand_ptrs, &ir_builder_, module_);
+ llvm_ir::EmitTuple(target_array, tuple_operand_ptrs, &b_, module_);
} else {
if (ShouldEmitParallelLoopFor(*target_op)) {
@@ -2895,11 +2849,11 @@ Status IrEmitter::EmitTargetElementLoop(
compute_function_->GetDynamicLoopBounds();
// Emit parallel loop with dynamic loop bounds for most-major dimensions.
TF_RETURN_IF_ERROR(ParallelLoopEmitter(element_generator, target_array,
- &dynamic_loop_bounds, &ir_builder_)
+ &dynamic_loop_bounds, &b_)
.EmitLoop(IrName(target_op)));
} else {
TF_RETURN_IF_ERROR(
- llvm_ir::LoopEmitter(element_generator, target_array, &ir_builder_)
+ llvm_ir::LoopEmitter(element_generator, target_array, &b_)
.EmitLoop(IrName(target_op)));
}
}
@@ -2912,8 +2866,8 @@ Status IrEmitter::EmitMemcpy(const HloInstruction& source,
llvm::Value* destination_value = GetEmittedValueFor(&destination);
int64 source_size = ByteSizeOf(source.shape());
// TODO(b/63762267): Be more aggressive about specifying alignment.
- ir_builder_.CreateMemCpy(destination_value, /*DstAlign=*/1, source_value,
- /*SrcAlign=*/1, source_size);
+ b_.CreateMemCpy(destination_value, /*DstAlign=*/1, source_value,
+ /*SrcAlign=*/1, source_size);
return Status::OK();
}
@@ -2941,7 +2895,7 @@ Status IrEmitter::DefaultAction(HloInstruction* hlo) {
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (const HloInstruction* operand : hlo->operands()) {
operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) {
- return GetIrArrayFor(operand).EmitReadArrayElement(index, &ir_builder_);
+ return GetIrArrayFor(operand).EmitReadArrayElement(index, &b_);
};
}
CpuElementalIrEmitter elemental_emitter(hlo_module_config_, this, module_);
@@ -2956,8 +2910,8 @@ StatusOr<llvm::Value*> IrEmitter::EmitScalarCall(
std::vector<llvm::Value*> argument_addrs;
for (auto argument : arguments) {
llvm::Value* argument_addr = llvm_ir::EmitAllocaAtFunctionEntry(
- argument->getType(), "arg_addr", &ir_builder_);
- ir_builder_.CreateStore(argument, argument_addr);
+ argument->getType(), "arg_addr", &b_);
+ b_.CreateStore(argument, argument_addr);
argument_addrs.push_back(argument_addr);
}
return EmitElementFunctionCall(llvm_function,
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.h b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
index 419f19c24d..4e928ffadc 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
@@ -98,7 +98,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
bool is_top_level_computation,
std::vector<const HloInstruction*>* instruction_order);
- llvm::IRBuilder<>* ir_builder() { return &ir_builder_; }
+ llvm::IRBuilder<>* b() { return &b_; }
// Emits a call to `computation` with scalar arguments `arguments`.
StatusOr<llvm::Value*> EmitScalarCall(
@@ -148,6 +148,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
Status HandleConcatenate(HloInstruction* concatenate) override;
Status HandleConditional(HloInstruction* conditional) override;
Status HandleAfterAll(HloInstruction* gen_token) override;
+ Status HandleIota(HloInstruction* iota) override;
Status FinishVisit(HloInstruction* root) override;
Status Preprocess(HloInstruction* hlo) override;
@@ -415,7 +416,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
// creates the encapsulated llvm::Function s.t. it is added to the llvm
// module's function list).
std::unique_ptr<IrFunction> compute_function_;
- llvm::IRBuilder<> ir_builder_;
+ llvm::IRBuilder<> b_;
// Maps HLO instructions to their index into the profile counter array.
const std::unordered_map<const HloInstruction*, int64>
@@ -451,23 +452,22 @@ class IrEmitter : public DfsHloVisitorWithDefault {
: use_rdtscp_(use_rdtscp), prof_counters_(prof_counters) {}
// Record the cycle counter before an HLO executes.
- void RecordCycleStart(llvm::IRBuilder<>* ir_builder, HloInstruction* hlo);
+ void RecordCycleStart(llvm::IRBuilder<>* b, HloInstruction* hlo);
// Record the number of cycles it took for an HLO to execute.
- void RecordCycleDelta(llvm::IRBuilder<>* ir_builder, HloInstruction* hlo,
+ void RecordCycleDelta(llvm::IRBuilder<>* b, HloInstruction* hlo,
llvm::Value* prof_counter);
// Record the number of cycles it took for the entire computation to
// execute.
- void RecordCompleteComputation(llvm::IRBuilder<>* ir_builder,
+ void RecordCompleteComputation(llvm::IRBuilder<>* b,
llvm::Value* prof_counter);
// Convenience function to generate a call to an intrinsic which reads the
// CPU cycle counter.
- llvm::Value* ReadCycleCounter(llvm::IRBuilder<>* ir_builder);
+ llvm::Value* ReadCycleCounter(llvm::IRBuilder<>* b);
// Store the cycle counter delta to the per-HLO profile counter.
- void UpdateProfileCounter(llvm::IRBuilder<>* ir_builder,
- llvm::Value* prof_counter, llvm::Value* cycle_end,
- llvm::Value* cycle_start);
+ void UpdateProfileCounter(llvm::IRBuilder<>* b, llvm::Value* prof_counter,
+ llvm::Value* cycle_end, llvm::Value* cycle_start);
private:
// Should we use the x86-specific rdtscp or the generic readcyclecounter
diff --git a/tensorflow/compiler/xla/service/cpu/ir_function.cc b/tensorflow/compiler/xla/service/cpu/ir_function.cc
index 2d6f2f3818..6aff838462 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_function.cc
+++ b/tensorflow/compiler/xla/service/cpu/ir_function.cc
@@ -49,11 +49,10 @@ IrFunction::IrFunction(const string& function_name,
llvm::Function::LinkageTypes linkage,
const bool optimize_for_size_requested,
const bool enable_fast_math, llvm::Module* llvm_module,
- llvm::IRBuilder<>* ir_builder,
- int64 num_dynamic_loop_bounds)
- : ir_builder_(ir_builder),
+ llvm::IRBuilder<>* b, int64 num_dynamic_loop_bounds)
+ : b_(b),
llvm_module_(llvm_module),
- caller_insert_point_guard_(*ir_builder),
+ caller_insert_point_guard_(*b),
num_dynamic_loop_bounds_(num_dynamic_loop_bounds) {
Initialize(function_name, linkage, optimize_for_size_requested,
enable_fast_math);
@@ -61,7 +60,7 @@ IrFunction::IrFunction(const string& function_name,
IrFunction::~IrFunction() {
// Emit function return value.
- ir_builder_->CreateRetVoid();
+ b_->CreateRetVoid();
}
DynamicLoopBounds IrFunction::GetDynamicLoopBounds() {
@@ -174,7 +173,7 @@ void IrFunction::Initialize(const string& function_name,
function_->addAttribute(argument.getArgNo() + 1, llvm::Attribute::NoAlias);
}
- ir_builder_->SetInsertPoint(llvm::BasicBlock::Create(
+ b_->SetInsertPoint(llvm::BasicBlock::Create(
/*Context=*/llvm_module_->getContext(),
/*Name=*/"entry",
/*Parent=*/function_));
@@ -184,9 +183,8 @@ llvm::Value* IrFunction::GetDynamicLoopBound(const int64 offset) {
CHECK_GT(num_dynamic_loop_bounds_, 0);
CHECK_LT(offset, num_dynamic_loop_bounds_ * 2);
string name = tensorflow::strings::StrCat("dynamic_loop_bound_", offset);
- return ir_builder_->CreateLoad(
- ir_builder_->CreateGEP(CHECK_NOTNULL(dynamic_loop_bounds_arg_),
- ir_builder_->getInt64(offset), AsStringRef(name)));
+ return b_->CreateLoad(b_->CreateGEP(CHECK_NOTNULL(dynamic_loop_bounds_arg_),
+ b_->getInt64(offset), AsStringRef(name)));
}
// Emits code to allocate an array of parameter address pointers, and store
@@ -195,27 +193,25 @@ llvm::Value* IrFunction::GetDynamicLoopBound(const int64 offset) {
// address buffer).
std::vector<llvm::Value*> GetArrayFunctionCallArguments(
tensorflow::gtl::ArraySlice<llvm::Value*> parameter_addresses,
- llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece name,
+ llvm::IRBuilder<>* b, tensorflow::StringPiece name,
llvm::Value* return_value_buffer, llvm::Value* exec_run_options_arg,
llvm::Value* temp_buffers_arg, llvm::Value* profile_counters_arg) {
llvm::Value* parameter_addresses_buffer =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
- ir_builder->getInt8PtrTy(),
- ir_builder->getInt32(parameter_addresses.size()),
- tensorflow::strings::StrCat(name, "_parameter_addresses"),
- ir_builder);
+ b->getInt8PtrTy(), b->getInt32(parameter_addresses.size()),
+ tensorflow::strings::StrCat(name, "_parameter_addresses"), b);
for (size_t i = 0; i < parameter_addresses.size(); ++i) {
- llvm::Value* parameter_as_i8ptr = ir_builder->CreateBitCast(
- parameter_addresses[i], ir_builder->getInt8PtrTy(),
- AsStringRef(tensorflow::strings::StrCat(name, "_parameter_", i,
- "_address_as_i8ptr")));
- llvm::Value* slot_in_param_addresses = ir_builder->CreateInBoundsGEP(
- parameter_addresses_buffer, {ir_builder->getInt64(i)});
- ir_builder->CreateStore(parameter_as_i8ptr, slot_in_param_addresses);
+ llvm::Value* parameter_as_i8ptr =
+ b->CreateBitCast(parameter_addresses[i], b->getInt8PtrTy(),
+ AsStringRef(tensorflow::strings::StrCat(
+ name, "_parameter_", i, "_address_as_i8ptr")));
+ llvm::Value* slot_in_param_addresses =
+ b->CreateInBoundsGEP(parameter_addresses_buffer, {b->getInt64(i)});
+ b->CreateStore(parameter_as_i8ptr, slot_in_param_addresses);
}
const auto to_int8_ptr = [=](llvm::Value* ptr) {
- return ir_builder->CreatePointerCast(ptr, ir_builder->getInt8PtrTy());
+ return b->CreatePointerCast(ptr, b->getInt8PtrTy());
};
std::vector<llvm::Value*> arguments{
to_int8_ptr(return_value_buffer), to_int8_ptr(exec_run_options_arg),
@@ -230,22 +226,21 @@ std::vector<llvm::Value*> GetArrayFunctionCallArguments(
// calls to 'parallel_function' (and joins threads before returning).
Status EmitCallToParallelForkJoin(
const std::vector<llvm::Value*>& arguments, const Shape& shape,
- const std::vector<int64>& dimension_partition_counts,
- llvm::IRBuilder<>* ir_builder, llvm::Function* parallel_function,
- const string& name) {
- llvm::Module* module = ir_builder->GetInsertBlock()->getModule();
+ const std::vector<int64>& dimension_partition_counts, llvm::IRBuilder<>* b,
+ llvm::Function* parallel_function, const string& name) {
+ llvm::Module* module = b->GetInsertBlock()->getModule();
// Build ParallelForkJoin function type.
std::vector<llvm::Type*> compute_function_params =
GetComputeFunctionParams(module, /*num_dynamic_loop_bounds=*/0);
// Number of parallel compute functions.
- compute_function_params.push_back(ir_builder->getInt32Ty());
+ compute_function_params.push_back(b->getInt32Ty());
// Array of partitions. There is an array element for each
// partition x partition_dim x 2 (for dimension start and limit).
compute_function_params.push_back(
llvm::Type::getInt64PtrTy(module->getContext()));
// Number of partitioned most-major dimensions in 'shape'.
- compute_function_params.push_back(ir_builder->getInt32Ty());
+ compute_function_params.push_back(b->getInt32Ty());
// Function pointer for compute function to be dispatched in parallel.
compute_function_params.push_back(
llvm::Type::getInt8PtrTy(module->getContext()));
@@ -268,7 +263,7 @@ Status EmitCallToParallelForkJoin(
ShapePartitionIterator partition_iterator(shape, dimension_partition_counts);
const int64 num_partitions = partition_iterator.GetTotalPartitionCount();
// Add argument specifying the number of parallel partitions.
- fork_join_arguments.push_back(ir_builder->getInt32(num_partitions));
+ fork_join_arguments.push_back(b->getInt32(num_partitions));
// The number of partitioned most-major dimensions in 'shape'.
const int32 num_partitioned_dims = dimension_partition_counts.size();
@@ -293,15 +288,15 @@ Status EmitCallToParallelForkJoin(
const std::pair<int64, int64>& dim_partition = dim_partitions[j];
const int32 index = partition_index + j * dim_partition_size;
// Store partition [dim_start, dim_limit) intervals for each dimension.
- partitions[index] = ir_builder->getInt64(dim_partition.first);
+ partitions[index] = b->getInt64(dim_partition.first);
partitions[index + 1] =
- ir_builder->getInt64(dim_partition.first + dim_partition.second);
+ b->getInt64(dim_partition.first + dim_partition.second);
}
}
// Create global variable out of dimension partitions in 'partitions'.
llvm::ArrayType* partitions_array_type =
- llvm::ArrayType::get(ir_builder->getInt64Ty(), partition_array_size);
+ llvm::ArrayType::get(b->getInt64Ty(), partition_array_size);
llvm::Constant* partitions_array =
llvm::ConstantArray::get(partitions_array_type, partitions);
llvm::GlobalVariable* global_partitions_array = new llvm::GlobalVariable(
@@ -315,16 +310,16 @@ Status EmitCallToParallelForkJoin(
tensorflow::strings::StrCat(name, "_parallel_dimension_partitions")));
// Add argument specifying parallel dimension partitions.
- fork_join_arguments.push_back(ir_builder->CreateBitCast(
- global_partitions_array,
- llvm::Type::getInt64PtrTy(module->getContext())));
+ fork_join_arguments.push_back(
+ b->CreateBitCast(global_partitions_array,
+ llvm::Type::getInt64PtrTy(module->getContext())));
// Add argument specifying the number of partitioned most-major dimensions.
- fork_join_arguments.push_back(ir_builder->getInt32(num_partitioned_dims));
+ fork_join_arguments.push_back(b->getInt32(num_partitioned_dims));
// Add argument for parallel compute function pointer.
fork_join_arguments.push_back(
- ir_builder->CreateBitCast(parallel_function, ir_builder->getInt8PtrTy()));
+ b->CreateBitCast(parallel_function, b->getInt8PtrTy()));
// Emit call to parallel fork/join.
- ir_builder->CreateCall(fork_join_func, fork_join_arguments);
+ b->CreateCall(fork_join_func, fork_join_arguments);
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/cpu/ir_function.h b/tensorflow/compiler/xla/service/cpu/ir_function.h
index 2e55181eed..a41cbb64cd 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_function.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_function.h
@@ -54,7 +54,7 @@ class IrFunction {
IrFunction(const string& function_name, llvm::Function::LinkageTypes linkage,
const bool optimize_for_size_requested,
const bool enable_fast_math, llvm::Module* llvm_module,
- llvm::IRBuilder<>* ir_builder, int64 num_dynamic_loop_bounds);
+ llvm::IRBuilder<>* b, int64 num_dynamic_loop_bounds);
~IrFunction();
// Emit ir to read and return the set of ir values representing the dynamic
@@ -97,7 +97,7 @@ class IrFunction {
// 'offset' from the "dynamic_loop_bounds" argument of this function.
llvm::Value* GetDynamicLoopBound(int64 offset);
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
llvm::Module* llvm_module_;
llvm::IRBuilder<>::InsertPointGuard caller_insert_point_guard_;
@@ -116,7 +116,7 @@ class IrFunction {
// Returns an array of compute function call argument ir values.
std::vector<llvm::Value*> GetArrayFunctionCallArguments(
tensorflow::gtl::ArraySlice<llvm::Value*> parameter_addresses,
- llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece name,
+ llvm::IRBuilder<>* b, tensorflow::StringPiece name,
llvm::Value* return_value_buffer, llvm::Value* exec_run_options_arg,
llvm::Value* temp_buffers_arg, llvm::Value* profile_counters_arg);
@@ -124,9 +124,8 @@ std::vector<llvm::Value*> GetArrayFunctionCallArguments(
// calls to 'parallel_function' (and joins threads before returning).
Status EmitCallToParallelForkJoin(
const std::vector<llvm::Value*>& arguments, const Shape& shape,
- const std::vector<int64>& dimension_partition_counts,
- llvm::IRBuilder<>* ir_builder, llvm::Function* parallel_function,
- const string& name);
+ const std::vector<int64>& dimension_partition_counts, llvm::IRBuilder<>* b,
+ llvm::Function* parallel_function, const string& name);
} // namespace cpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc b/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc
index 2e5cc96098..ec0498e04e 100644
--- a/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc
+++ b/tensorflow/compiler/xla/service/cpu/llvm_ir_runtime.cc
@@ -52,12 +52,12 @@ llvm::Function* EmitVectorF32TanhIfNeeded(llvm::Module* module,
llvm::BasicBlock* vector_tanh_body =
llvm::BasicBlock::Create(*context, "body", vector_tanh_function);
- llvm::IRBuilder<> ir_builder(vector_tanh_body);
+ llvm::IRBuilder<> b(vector_tanh_body);
llvm::FastMathFlags fast_math_flags;
fast_math_flags.setFast();
- ir_builder.setFastMathFlags(fast_math_flags);
+ b.setFastMathFlags(fast_math_flags);
- VectorSupportLibrary vsl(F32, vector_width, &ir_builder, "tanh_f32");
+ VectorSupportLibrary vsl(F32, vector_width, &b, "tanh_f32");
llvm::Value* input = &*vector_tanh_function->arg_begin();
CHECK_EQ(input->getType(), vsl.vector_type());
@@ -91,7 +91,7 @@ llvm::Function* EmitVectorF32TanhIfNeeded(llvm::Module* module,
}
llvm::Value* result = vsl.Div(numerator, denominator);
- ir_builder.CreateRet(result);
+ b.CreateRet(result);
DCHECK(!llvm::verifyFunction(*vector_tanh_function));
return vector_tanh_function;
@@ -113,12 +113,12 @@ llvm::Function* EmitVectorF32ExpIfNeeded(llvm::Module* module,
llvm::BasicBlock* vector_exp_body =
llvm::BasicBlock::Create(*context, "body", vector_exp_function);
- llvm::IRBuilder<> ir_builder(vector_exp_body);
+ llvm::IRBuilder<> b(vector_exp_body);
llvm::FastMathFlags fast_math_flags;
fast_math_flags.setFast();
- ir_builder.setFastMathFlags(fast_math_flags);
+ b.setFastMathFlags(fast_math_flags);
- VectorSupportLibrary vsl(F32, vector_width, &ir_builder, "exp_f32");
+ VectorSupportLibrary vsl(F32, vector_width, &b, "exp_f32");
// This implements the same polynomial approximation as implemented in Eigen3.
@@ -160,21 +160,21 @@ llvm::Function* EmitVectorF32ExpIfNeeded(llvm::Module* module,
// VectorSupportLibrary (intentionally) can't juggle more than one type at a
// time so drop down to IRBuilder for this bit.
llvm::Value* vector_constant_0x7f =
- ir_builder.CreateVectorSplat(vector_width, ir_builder.getInt32(0x7f));
+ b.CreateVectorSplat(vector_width, b.getInt32(0x7f));
llvm::Value* vector_constant_23 =
- ir_builder.CreateVectorSplat(vector_width, ir_builder.getInt32(23));
+ b.CreateVectorSplat(vector_width, b.getInt32(23));
llvm::Type* i32_vector_type =
- llvm::VectorType::get(ir_builder.getInt32Ty(), vector_width);
+ llvm::VectorType::get(b.getInt32Ty(), vector_width);
// fx is clamped so we don't have to worry about it being out of range for
// i32.
- llvm::Value* emm0 = ir_builder.CreateFPToSI(fx, i32_vector_type);
- emm0 = ir_builder.CreateAdd(emm0, vector_constant_0x7f);
- emm0 = ir_builder.CreateShl(emm0, vector_constant_23);
- llvm::Value* emm0_f32 = ir_builder.CreateBitCast(emm0, vsl.vector_type());
+ llvm::Value* emm0 = b.CreateFPToSI(fx, i32_vector_type);
+ emm0 = b.CreateAdd(emm0, vector_constant_0x7f);
+ emm0 = b.CreateShl(emm0, vector_constant_23);
+ llvm::Value* emm0_f32 = b.CreateBitCast(emm0, vsl.vector_type());
llvm::Value* result = vsl.Max(vsl.Mul(y, emm0_f32), input);
- ir_builder.CreateRet(result);
+ b.CreateRet(result);
DCHECK(!llvm::verifyFunction(*vector_exp_function));
return vector_exp_function;
@@ -196,13 +196,13 @@ llvm::Function* EmitVectorF32LogIfNeeded(llvm::Module* module,
llvm::BasicBlock* vector_log_body =
llvm::BasicBlock::Create(*context, "body", vector_log_function);
- llvm::IRBuilder<> ir_builder(vector_log_body);
+ llvm::IRBuilder<> b(vector_log_body);
llvm::FastMathFlags fast_math_flags;
fast_math_flags.setFast();
- ir_builder.setFastMathFlags(fast_math_flags);
+ b.setFastMathFlags(fast_math_flags);
llvm::Value* input = &*vector_log_function->arg_begin();
- VectorSupportLibrary vsl(F32, vector_width, &ir_builder, "log_f32");
+ VectorSupportLibrary vsl(F32, vector_width, &b, "log_f32");
const llvm::APFloat half = GetIeeeF32(0.5);
const llvm::APFloat one = GetIeeeF32(1.0);
@@ -238,22 +238,21 @@ llvm::Function* EmitVectorF32LogIfNeeded(llvm::Module* module,
// VectorSupportLibrary (intentionally) can't juggle more than one type at a
// time so drop down to IRBuilder for this bit.
llvm::Value* vector_constant_0x7f =
- ir_builder.CreateVectorSplat(vector_width, ir_builder.getInt32(0x7f));
+ b.CreateVectorSplat(vector_width, b.getInt32(0x7f));
llvm::Value* vector_constant_23 =
- ir_builder.CreateVectorSplat(vector_width, ir_builder.getInt32(23));
+ b.CreateVectorSplat(vector_width, b.getInt32(23));
llvm::Type* i32_vector_type =
- llvm::VectorType::get(ir_builder.getInt32Ty(), vector_width);
+ llvm::VectorType::get(b.getInt32Ty(), vector_width);
- llvm::Value* emm0 = ir_builder.CreateLShr(
- ir_builder.CreateBitCast(input, i32_vector_type), vector_constant_23);
+ llvm::Value* emm0 =
+ b.CreateLShr(b.CreateBitCast(input, i32_vector_type), vector_constant_23);
// Keep only the fractional part.
input = vsl.FloatAnd(input, inv_mant_mask);
input = vsl.FloatOr(input, half);
- emm0 = ir_builder.CreateSub(emm0, vector_constant_0x7f);
- llvm::Value* e =
- vsl.Add(one, ir_builder.CreateSIToFP(emm0, vsl.vector_type()));
+ emm0 = b.CreateSub(emm0, vector_constant_0x7f);
+ llvm::Value* e = vsl.Add(one, b.CreateSIToFP(emm0, vsl.vector_type()));
// part2:
// if( x < SQRTHF ) {
@@ -294,7 +293,7 @@ llvm::Function* EmitVectorF32LogIfNeeded(llvm::Module* module,
llvm::Value* or_rhs = vsl.FloatAnd(iszero_mask, minus_inf);
llvm::Value* result = vsl.FloatOr(or_lhs, or_rhs);
- ir_builder.CreateRet(result);
+ b.CreateRet(result);
DCHECK(!llvm::verifyFunction(*vector_log_function));
return vector_log_function;
diff --git a/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc b/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc
index 59ae5acd8b..8560e4296a 100644
--- a/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.cc
@@ -25,8 +25,8 @@ namespace cpu {
ParallelLoopEmitter::ParallelLoopEmitter(
const llvm_ir::ElementGenerator& target_element_generator,
const llvm_ir::IrArray& target_array,
- const DynamicLoopBounds* dynamic_loop_bounds, llvm::IRBuilder<>* ir_builder)
- : LoopEmitter(target_element_generator, target_array, ir_builder),
+ const DynamicLoopBounds* dynamic_loop_bounds, llvm::IRBuilder<>* b)
+ : LoopEmitter(target_element_generator, target_array, b),
dynamic_loop_bounds_(dynamic_loop_bounds) {}
std::vector<llvm_ir::IrArray::Index>
@@ -37,7 +37,7 @@ ParallelLoopEmitter::EmitIndexAndSetExitBasicBlock(
CHECK(!ShapeUtil::IsTuple(shape_));
CHECK(!ShapeUtil::IsScalar(shape_));
- llvm_ir::ForLoopNest loop_nest(loop_name, ir_builder_);
+ llvm_ir::ForLoopNest loop_nest(loop_name, b_);
const int64 num_dims = shape_.dimensions_size();
llvm_ir::IrArray::Index array_index(index_type, num_dims);
@@ -65,8 +65,7 @@ ParallelLoopEmitter::EmitIndexAndSetExitBasicBlock(
}
}
// Point IR builder at inner loop BB.
- llvm_ir::SetToFirstInsertPoint(loop_nest.GetInnerLoopBodyBasicBlock(),
- ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(loop_nest.GetInnerLoopBodyBasicBlock(), b_);
// Set exit_bb_ to the exit block of the loop nest.
exit_bb_ = loop_nest.GetOuterLoopExitBasicBlock();
diff --git a/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h b/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h
index 25e182a26d..076c683ca5 100644
--- a/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/parallel_loop_emitter.h
@@ -54,7 +54,7 @@ class ParallelLoopEmitter : public llvm_ir::LoopEmitter {
ParallelLoopEmitter(const llvm_ir::ElementGenerator& target_element_generator,
const llvm_ir::IrArray& target_array,
const DynamicLoopBounds* dynamic_loop_bounds,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
ParallelLoopEmitter(const ParallelLoopEmitter&) = delete;
ParallelLoopEmitter& operator=(const ParallelLoopEmitter&) = delete;
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
index ccb61740f6..01daed4bcd 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
@@ -78,7 +78,7 @@ TEST_F(CpuNoAliasTest, Concat) {
llvm::Function* func = llvm::cast<llvm::Function>(
ir_module.getOrInsertFunction("test_fn", llvm::Type::getVoidTy(context)));
llvm::BasicBlock* bb = llvm::BasicBlock::Create(context, "body", func);
- llvm::IRBuilder<> ir_builder(bb);
+ llvm::IRBuilder<> b(bb);
auto* zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(context), 0);
llvm_ir::IrArray::Index zero2D({zero, zero});
@@ -90,7 +90,7 @@ TEST_F(CpuNoAliasTest, Concat) {
ir_module.getOrInsertGlobal("param_x", array2d_type);
llvm_ir::IrArray param_x_array(param_x_val, param_shape);
aa.AddAliasingInformationToIrArray(*param_x, &param_x_array);
- param_x_array.EmitReadArrayElement(zero2D, &ir_builder)
+ param_x_array.EmitReadArrayElement(zero2D, &b)
->setName("read_param_x_array");
}
@@ -100,7 +100,7 @@ TEST_F(CpuNoAliasTest, Concat) {
auto shape = ShapeUtil::MakeShape(F32, {2, 4});
llvm_ir::IrArray concat1_array(concat1_val, shape);
aa.AddAliasingInformationToIrArray(*concat1, &concat1_array);
- concat1_array.EmitReadArrayElement(zero2D, &ir_builder)
+ concat1_array.EmitReadArrayElement(zero2D, &b)
->setName("read_concat1_array");
}
@@ -110,7 +110,7 @@ TEST_F(CpuNoAliasTest, Concat) {
auto shape = ShapeUtil::MakeShape(F32, {2, 6});
llvm_ir::IrArray concat2_array(concat2_val, shape);
aa.AddAliasingInformationToIrArray(*concat2, &concat2_array);
- concat2_array.EmitReadArrayElement(zero2D, &ir_builder)
+ concat2_array.EmitReadArrayElement(zero2D, &b)
->setName("read_concat2_array");
}
diff --git a/tensorflow/compiler/xla/service/cpu/vector_support_library.cc b/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
index c444d15185..3274be8d9d 100644
--- a/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
+++ b/tensorflow/compiler/xla/service/cpu/vector_support_library.cc
@@ -23,14 +23,14 @@ namespace xla {
namespace cpu {
VectorSupportLibrary::VectorSupportLibrary(PrimitiveType primitive_type,
int64 vector_size,
- llvm::IRBuilder<>* ir_builder,
+ llvm::IRBuilder<>* b,
std::string name)
: vector_size_(vector_size),
primitive_type_(primitive_type),
- ir_builder_(ir_builder),
+ b_(b),
name_(std::move(name)) {
scalar_type_ = llvm_ir::PrimitiveTypeToIrType(
- primitive_type, ir_builder_->GetInsertBlock()->getModule());
+ primitive_type, b_->GetInsertBlock()->getModule());
scalar_pointer_type_ = llvm::PointerType::getUnqual(scalar_type_);
vector_type_ = llvm::VectorType::get(scalar_type_, vector_size);
vector_pointer_type_ = llvm::PointerType::getUnqual(vector_type_);
@@ -63,9 +63,9 @@ llvm::Value* VectorSupportLibrary::Mul(llvm::Value* lhs, llvm::Value* rhs) {
llvm::Value* VectorSupportLibrary::MulInternal(llvm::Value* lhs,
llvm::Value* rhs) {
if (scalar_type_->isFloatingPointTy()) {
- return ir_builder()->CreateFMul(lhs, rhs, name());
+ return b()->CreateFMul(lhs, rhs, name());
} else {
- return ir_builder()->CreateMul(lhs, rhs, name());
+ return b()->CreateMul(lhs, rhs, name());
}
}
@@ -76,13 +76,13 @@ llvm::Value* VectorSupportLibrary::Add(llvm::Value* lhs, llvm::Value* rhs) {
llvm::Value* VectorSupportLibrary::Sub(llvm::Value* lhs, llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
- return ir_builder()->CreateFSub(lhs, rhs);
+ return b()->CreateFSub(lhs, rhs);
}
llvm::Value* VectorSupportLibrary::Max(llvm::Value* lhs, llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
if (scalar_type_->isFloatingPointTy()) {
- return llvm_ir::EmitFloatMax(lhs, rhs, ir_builder_);
+ return llvm_ir::EmitFloatMax(lhs, rhs, b_);
} else {
LOG(FATAL) << "Max for integers is unimplemented";
}
@@ -91,13 +91,13 @@ llvm::Value* VectorSupportLibrary::Max(llvm::Value* lhs, llvm::Value* rhs) {
llvm::Value* VectorSupportLibrary::Floor(llvm::Value* a) {
AssertCorrectTypes({a});
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::floor, {a},
- {a->getType()}, ir_builder());
+ {a->getType()}, b());
}
llvm::Value* VectorSupportLibrary::Div(llvm::Value* lhs, llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
if (scalar_type_->isFloatingPointTy()) {
- return ir_builder()->CreateFDiv(lhs, rhs, name());
+ return b()->CreateFDiv(lhs, rhs, name());
} else {
LOG(FATAL) << "Division for integers is unimplemented";
}
@@ -111,42 +111,41 @@ llvm::Value* VectorSupportLibrary::Clamp(llvm::Value* a,
CHECK(low.compare(high) == llvm::APFloat::cmpLessThan);
CHECK(scalar_type_->isFloatingPointTy());
return llvm_ir::EmitFloatMin(
- llvm_ir::EmitFloatMax(a, GetConstantFloat(type, low), ir_builder_),
- GetConstantFloat(type, high), ir_builder_);
+ llvm_ir::EmitFloatMax(a, GetConstantFloat(type, low), b_),
+ GetConstantFloat(type, high), b_);
}
llvm::Value* VectorSupportLibrary::FCmpEQMask(llvm::Value* lhs,
llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
- return I1ToFloat(ir_builder()->CreateFCmpOEQ(lhs, rhs, name()));
+ return I1ToFloat(b()->CreateFCmpOEQ(lhs, rhs, name()));
}
llvm::Value* VectorSupportLibrary::FCmpOLTMask(llvm::Value* lhs,
llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
- return I1ToFloat(ir_builder()->CreateFCmpOLT(lhs, rhs, name()));
+ return I1ToFloat(b()->CreateFCmpOLT(lhs, rhs, name()));
}
llvm::Value* VectorSupportLibrary::FCmpULEMask(llvm::Value* lhs,
llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
- return I1ToFloat(ir_builder()->CreateFCmpULE(lhs, rhs, name()));
+ return I1ToFloat(b()->CreateFCmpULE(lhs, rhs, name()));
}
llvm::Value* VectorSupportLibrary::I1ToFloat(llvm::Value* i1) {
bool is_vector = llvm::isa<llvm::VectorType>(i1->getType());
llvm::Type* integer_type = IntegerTypeForFloatSize(is_vector);
- return ir_builder()->CreateBitCast(
- ir_builder()->CreateSExt(i1, integer_type, name()),
- is_vector ? vector_type() : scalar_type(), name());
+ return b()->CreateBitCast(b()->CreateSExt(i1, integer_type, name()),
+ is_vector ? vector_type() : scalar_type(), name());
}
llvm::Type* VectorSupportLibrary::IntegerTypeForFloatSize(bool vector) {
CHECK(scalar_type()->isFloatingPointTy());
const llvm::DataLayout& data_layout =
- ir_builder()->GetInsertBlock()->getModule()->getDataLayout();
+ b()->GetInsertBlock()->getModule()->getDataLayout();
int64 float_size_bits = data_layout.getTypeSizeInBits(scalar_type());
- llvm::Type* scalar_int_type = ir_builder()->getIntNTy(float_size_bits);
+ llvm::Type* scalar_int_type = b()->getIntNTy(float_size_bits);
if (vector) {
return llvm::VectorType::get(scalar_int_type, vector_size());
} else {
@@ -156,7 +155,7 @@ llvm::Type* VectorSupportLibrary::IntegerTypeForFloatSize(bool vector) {
llvm::Value* VectorSupportLibrary::BroadcastScalar(llvm::Value* x) {
CHECK_EQ(x->getType(), scalar_type());
- return ir_builder()->CreateVectorSplat(vector_size(), x, name());
+ return b()->CreateVectorSplat(vector_size(), x, name());
}
llvm::Value* VectorSupportLibrary::FloatAnd(llvm::Value* lhs,
@@ -164,10 +163,9 @@ llvm::Value* VectorSupportLibrary::FloatAnd(llvm::Value* lhs,
AssertCorrectTypes({lhs, rhs});
llvm::Type* int_type =
IntegerTypeForFloatSize(lhs->getType() == vector_type());
- return ir_builder()->CreateBitCast(
- ir_builder()->CreateAnd(
- ir_builder()->CreateBitCast(lhs, int_type, name()),
- ir_builder()->CreateBitCast(rhs, int_type, name()), name()),
+ return b()->CreateBitCast(
+ b()->CreateAnd(b()->CreateBitCast(lhs, int_type, name()),
+ b()->CreateBitCast(rhs, int_type, name()), name()),
vector_type());
}
@@ -175,9 +173,8 @@ llvm::Value* VectorSupportLibrary::FloatNot(llvm::Value* lhs) {
AssertCorrectTypes({lhs});
llvm::Type* int_type =
IntegerTypeForFloatSize(lhs->getType() == vector_type());
- return ir_builder()->CreateBitCast(
- ir_builder()->CreateNot(
- ir_builder()->CreateBitCast(lhs, int_type, name()), name()),
+ return b()->CreateBitCast(
+ b()->CreateNot(b()->CreateBitCast(lhs, int_type, name()), name()),
vector_type());
}
@@ -185,47 +182,43 @@ llvm::Value* VectorSupportLibrary::FloatOr(llvm::Value* lhs, llvm::Value* rhs) {
AssertCorrectTypes({lhs, rhs});
llvm::Type* int_type =
IntegerTypeForFloatSize(lhs->getType() == vector_type());
- return ir_builder()->CreateBitCast(
- ir_builder()->CreateOr(ir_builder()->CreateBitCast(lhs, int_type, name()),
- ir_builder()->CreateBitCast(rhs, int_type, name()),
- name()),
+ return b()->CreateBitCast(
+ b()->CreateOr(b()->CreateBitCast(lhs, int_type, name()),
+ b()->CreateBitCast(rhs, int_type, name()), name()),
vector_type(), name());
}
llvm::Value* VectorSupportLibrary::AddInternal(llvm::Value* lhs,
llvm::Value* rhs) {
if (scalar_type_->isFloatingPointTy()) {
- return ir_builder()->CreateFAdd(lhs, rhs, name());
+ return b()->CreateFAdd(lhs, rhs, name());
} else {
- return ir_builder()->CreateAdd(lhs, rhs, name());
+ return b()->CreateAdd(lhs, rhs, name());
}
}
llvm::Value* VectorSupportLibrary::ComputeOffsetPointer(
llvm::Value* base_pointer, llvm::Value* offset_elements) {
if (base_pointer->getType() != scalar_pointer_type()) {
- base_pointer = ir_builder()->CreateBitCast(base_pointer,
- scalar_pointer_type(), name());
+ base_pointer =
+ b()->CreateBitCast(base_pointer, scalar_pointer_type(), name());
}
- return ir_builder()->CreateInBoundsGEP(base_pointer, {offset_elements},
- name());
+ return b()->CreateInBoundsGEP(base_pointer, {offset_elements}, name());
}
llvm::Value* VectorSupportLibrary::LoadVector(llvm::Value* pointer) {
if (pointer->getType() != vector_pointer_type()) {
- pointer =
- ir_builder()->CreateBitCast(pointer, vector_pointer_type(), name());
+ pointer = b()->CreateBitCast(pointer, vector_pointer_type(), name());
}
- return ir_builder()->CreateAlignedLoad(
+ return b()->CreateAlignedLoad(
pointer, ShapeUtil::ByteSizeOfPrimitiveType(primitive_type_), name());
}
llvm::Value* VectorSupportLibrary::LoadScalar(llvm::Value* pointer) {
if (pointer->getType() != scalar_pointer_type()) {
- pointer =
- ir_builder()->CreateBitCast(pointer, scalar_pointer_type(), name());
+ pointer = b()->CreateBitCast(pointer, scalar_pointer_type(), name());
}
- return ir_builder()->CreateAlignedLoad(
+ return b()->CreateAlignedLoad(
pointer, ShapeUtil::ByteSizeOfPrimitiveType(primitive_type_), name());
}
@@ -233,30 +226,28 @@ void VectorSupportLibrary::StoreVector(llvm::Value* value,
llvm::Value* pointer) {
AssertCorrectTypes({value});
if (pointer->getType() != vector_pointer_type()) {
- pointer = ir_builder()->CreateBitCast(pointer, vector_pointer_type());
+ pointer = b()->CreateBitCast(pointer, vector_pointer_type());
}
- ir_builder()->CreateAlignedStore(
- value, pointer, ShapeUtil::ByteSizeOfPrimitiveType(primitive_type_));
+ b()->CreateAlignedStore(value, pointer,
+ ShapeUtil::ByteSizeOfPrimitiveType(primitive_type_));
}
void VectorSupportLibrary::StoreScalar(llvm::Value* value,
llvm::Value* pointer) {
AssertCorrectTypes({value});
if (pointer->getType() != scalar_pointer_type()) {
- pointer =
- ir_builder()->CreateBitCast(pointer, scalar_pointer_type(), name());
+ pointer = b()->CreateBitCast(pointer, scalar_pointer_type(), name());
}
- ir_builder()->CreateAlignedStore(
- value, pointer, ShapeUtil::ByteSizeOfPrimitiveType(primitive_type_));
+ b()->CreateAlignedStore(value, pointer,
+ ShapeUtil::ByteSizeOfPrimitiveType(primitive_type_));
}
llvm::Value* VectorSupportLibrary::LoadBroadcast(llvm::Value* pointer) {
if (pointer->getType() != scalar_pointer_type()) {
- pointer =
- ir_builder()->CreateBitCast(pointer, scalar_pointer_type(), name());
+ pointer = b()->CreateBitCast(pointer, scalar_pointer_type(), name());
}
- return ir_builder()->CreateVectorSplat(
- vector_size(), ir_builder()->CreateLoad(pointer), name());
+ return b()->CreateVectorSplat(vector_size(), b()->CreateLoad(pointer),
+ name());
}
llvm::Value* VectorSupportLibrary::AddReduce(llvm::Value* vector) {
@@ -267,20 +258,19 @@ llvm::Value* VectorSupportLibrary::AddReduce(llvm::Value* vector) {
for (unsigned j = 0; j < vector_size(); ++j) {
if (j < (i / 2)) {
- mask[j] = ir_builder()->getInt32(i / 2 + j);
+ mask[j] = b()->getInt32(i / 2 + j);
} else {
- mask[j] = llvm::UndefValue::get(ir_builder()->getInt32Ty());
+ mask[j] = llvm::UndefValue::get(b()->getInt32Ty());
}
}
- llvm::Value* half_remaining_lanes = ir_builder()->CreateShuffleVector(
- vector, llvm::UndefValue::get(vector_type()),
- llvm::ConstantVector::get(mask), "");
+ llvm::Value* half_remaining_lanes =
+ b()->CreateShuffleVector(vector, llvm::UndefValue::get(vector_type()),
+ llvm::ConstantVector::get(mask), "");
vector = Add(vector, half_remaining_lanes);
}
- return ir_builder()->CreateExtractElement(vector, ir_builder()->getInt32(0),
- name());
+ return b()->CreateExtractElement(vector, b()->getInt32(0), name());
}
llvm::Value* VectorSupportLibrary::AvxStyleHorizontalAdd(llvm::Value* lhs,
@@ -307,19 +297,19 @@ llvm::Value* VectorSupportLibrary::AvxStyleHorizontalAdd(llvm::Value* lhs,
// vector, which are the lanes 2 and 3 in the rhs vector.
for (int i = 0; i < vector_size(); i += 2) {
int increment = i < vector_size() / 2 ? 0 : (vector_size() / 2);
- mask_a.push_back(ir_builder()->getInt32(increment + i));
- mask_b.push_back(ir_builder()->getInt32(increment + i + 1));
+ mask_a.push_back(b()->getInt32(increment + i));
+ mask_b.push_back(b()->getInt32(increment + i + 1));
}
for (int i = 0; i < vector_size(); i += 2) {
int increment = i < vector_size() / 2 ? (vector_size() / 2) : vector_size();
- mask_a.push_back(ir_builder()->getInt32(increment + i));
- mask_b.push_back(ir_builder()->getInt32(increment + i + 1));
+ mask_a.push_back(b()->getInt32(increment + i));
+ mask_b.push_back(b()->getInt32(increment + i + 1));
}
- llvm::Value* shuffle_0 = ir_builder()->CreateShuffleVector(
- lhs, rhs, llvm::ConstantVector::get(mask_a));
- llvm::Value* shuffle_1 = ir_builder()->CreateShuffleVector(
- lhs, rhs, llvm::ConstantVector::get(mask_b));
+ llvm::Value* shuffle_0 =
+ b()->CreateShuffleVector(lhs, rhs, llvm::ConstantVector::get(mask_a));
+ llvm::Value* shuffle_1 =
+ b()->CreateShuffleVector(lhs, rhs, llvm::ConstantVector::get(mask_b));
return Add(shuffle_0, shuffle_1);
}
@@ -327,23 +317,21 @@ llvm::Value* VectorSupportLibrary::AvxStyleHorizontalAdd(llvm::Value* lhs,
llvm::Value* VectorSupportLibrary::ExtractLowHalf(llvm::Value* vector) {
llvm::SmallVector<llvm::Constant*, 32> mask;
for (int i = 0; i < vector_size() / 2; i++) {
- mask.push_back(ir_builder()->getInt32(i));
+ mask.push_back(b()->getInt32(i));
}
- return ir_builder()->CreateShuffleVector(vector,
- llvm::UndefValue::get(vector_type()),
- llvm::ConstantVector::get(mask));
+ return b()->CreateShuffleVector(vector, llvm::UndefValue::get(vector_type()),
+ llvm::ConstantVector::get(mask));
}
llvm::Value* VectorSupportLibrary::ExtractHighHalf(llvm::Value* vector) {
llvm::SmallVector<llvm::Constant*, 32> mask;
for (int i = 0; i < vector_size() / 2; i++) {
- mask.push_back(ir_builder()->getInt32(i + vector_size() / 2));
+ mask.push_back(b()->getInt32(i + vector_size() / 2));
}
- return ir_builder()->CreateShuffleVector(vector,
- llvm::UndefValue::get(vector_type()),
- llvm::ConstantVector::get(mask));
+ return b()->CreateShuffleVector(vector, llvm::UndefValue::get(vector_type()),
+ llvm::ConstantVector::get(mask));
}
std::vector<llvm::Value*> VectorSupportLibrary::ComputeHorizontalSums(
@@ -360,8 +348,8 @@ std::vector<llvm::Value*> VectorSupportLibrary::ComputeHorizontalSums(
[this](llvm::Value* vector) { return AddReduce(vector); });
if (init_values) {
for (int64 i = 0, e = result.size(); i < e; i++) {
- result[i] = Add(result[i], ir_builder()->CreateExtractElement(
- init_values, ir_builder()->getInt32(i)));
+ result[i] = Add(result[i],
+ b()->CreateExtractElement(init_values, b()->getInt32(i)));
}
}
return result;
@@ -398,9 +386,9 @@ VectorSupportLibrary::ComputeAvxOptimizedHorizontalSums(
std::vector<llvm::Value*> results;
for (int i = 0; i < lane_width; i++) {
- llvm::Value* scalar_result = ir_builder()->CreateExtractElement(
- i < (lane_width / 2) ? low : high,
- ir_builder()->getInt32(i % (lane_width / 2)), name());
+ llvm::Value* scalar_result =
+ b()->CreateExtractElement(i < (lane_width / 2) ? low : high,
+ b()->getInt32(i % (lane_width / 2)), name());
results.push_back(scalar_result);
}
@@ -415,17 +403,14 @@ llvm::Value* VectorSupportLibrary::GetZeroScalar() {
return llvm::Constant::getNullValue(scalar_type());
}
-LlvmVariable::LlvmVariable(llvm::Type* type, llvm::IRBuilder<>* ir_builder)
- : ir_builder_(ir_builder) {
- alloca_ = llvm_ir::EmitAllocaAtFunctionEntry(type, "", ir_builder_);
+LlvmVariable::LlvmVariable(llvm::Type* type, llvm::IRBuilder<>* b) : b_(b) {
+ alloca_ = llvm_ir::EmitAllocaAtFunctionEntry(type, "", b_);
}
-llvm::Value* LlvmVariable::Get() const {
- return ir_builder_->CreateLoad(alloca_);
-}
+llvm::Value* LlvmVariable::Get() const { return b_->CreateLoad(alloca_); }
void LlvmVariable::Set(llvm::Value* new_value) {
- ir_builder_->CreateStore(new_value, alloca_);
+ b_->CreateStore(new_value, alloca_);
}
TileVariable::TileVariable(VectorSupportLibrary* vector_support,
diff --git a/tensorflow/compiler/xla/service/cpu/vector_support_library.h b/tensorflow/compiler/xla/service/cpu/vector_support_library.h
index 49c2a4e2f4..c728f6df0a 100644
--- a/tensorflow/compiler/xla/service/cpu/vector_support_library.h
+++ b/tensorflow/compiler/xla/service/cpu/vector_support_library.h
@@ -46,11 +46,11 @@ class VectorSupportLibrary {
// instance (i.e. LoadVector will load a vector of type <`vector_size` x
// `primitive_type`>).
VectorSupportLibrary(PrimitiveType primitive_type, int64 vector_size,
- llvm::IRBuilder<>* ir_builder, std::string name);
+ llvm::IRBuilder<>* b, std::string name);
llvm::Value* Mul(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* Mul(int64 lhs, llvm::Value* rhs) {
- return Mul(ir_builder()->getInt64(lhs), rhs);
+ return Mul(b()->getInt64(lhs), rhs);
}
llvm::Value* Mul(const llvm::APFloat& lhs, llvm::Value* rhs) {
return Mul(GetConstantFloat(rhs->getType(), lhs), rhs);
@@ -63,7 +63,7 @@ class VectorSupportLibrary {
llvm::Value* Add(llvm::Value* lhs, llvm::Value* rhs);
llvm::Value* Add(int64 lhs, llvm::Value* rhs) {
- return Add(ir_builder()->getInt64(lhs), rhs);
+ return Add(b()->getInt64(lhs), rhs);
}
llvm::Value* Add(const llvm::APFloat& lhs, llvm::Value* rhs) {
return Add(GetConstantFloat(rhs->getType(), lhs), rhs);
@@ -147,13 +147,11 @@ class VectorSupportLibrary {
llvm::Value* ComputeOffsetPointer(llvm::Value* base_pointer,
llvm::Value* offset_elements, int64 scale) {
return ComputeOffsetPointer(
- base_pointer,
- ir_builder_->CreateMul(ir_builder_->getInt64(scale), offset_elements));
+ base_pointer, b_->CreateMul(b_->getInt64(scale), offset_elements));
}
llvm::Value* ComputeOffsetPointer(llvm::Value* base_pointer,
int64 offset_elements) {
- return ComputeOffsetPointer(base_pointer,
- ir_builder()->getInt64(offset_elements));
+ return ComputeOffsetPointer(base_pointer, b()->getInt64(offset_elements));
}
llvm::Value* LoadVector(llvm::Value* pointer);
@@ -164,7 +162,7 @@ class VectorSupportLibrary {
}
llvm::Value* LoadVector(llvm::Value* base_pointer, int64 offset_elements) {
- return LoadVector(base_pointer, ir_builder()->getInt64(offset_elements));
+ return LoadVector(base_pointer, b()->getInt64(offset_elements));
}
llvm::Value* LoadScalar(llvm::Value* pointer);
@@ -175,7 +173,7 @@ class VectorSupportLibrary {
}
llvm::Value* LoadScalar(llvm::Value* base_pointer, int64 offset_elements) {
- return LoadScalar(base_pointer, ir_builder()->getInt64(offset_elements));
+ return LoadScalar(base_pointer, b()->getInt64(offset_elements));
}
void StoreVector(llvm::Value* value, llvm::Value* pointer);
@@ -187,7 +185,7 @@ class VectorSupportLibrary {
void StoreVector(llvm::Value* value, llvm::Value* base_pointer,
int64 offset_elements) {
- StoreVector(value, base_pointer, ir_builder()->getInt64(offset_elements));
+ StoreVector(value, base_pointer, b()->getInt64(offset_elements));
}
void StoreScalar(llvm::Value* value, llvm::Value* pointer);
@@ -198,7 +196,7 @@ class VectorSupportLibrary {
void StoreScalar(llvm::Value* value, llvm::Value* base_pointer,
int64 offset_elements) {
- StoreScalar(base_pointer, ir_builder()->getInt64(offset_elements));
+ StoreScalar(base_pointer, b()->getInt64(offset_elements));
}
llvm::Value* LoadBroadcast(llvm::Value* pointer);
@@ -207,7 +205,7 @@ class VectorSupportLibrary {
return LoadBroadcast(ComputeOffsetPointer(base_pointer, offset_elements));
}
llvm::Value* LoadBroadcast(llvm::Value* base_pointer, int64 offset_elements) {
- return LoadBroadcast(base_pointer, ir_builder()->getInt64(offset_elements));
+ return LoadBroadcast(base_pointer, b()->getInt64(offset_elements));
}
// Compute the horizontal sum of each vector in `vectors`. The i'th element
@@ -220,7 +218,7 @@ class VectorSupportLibrary {
llvm::Value* GetZeroVector();
llvm::Value* GetZeroScalar();
- llvm::IRBuilder<>* ir_builder() const { return ir_builder_; }
+ llvm::IRBuilder<>* b() const { return b_; }
int64 vector_size() const { return vector_size_; }
llvm::Type* vector_type() const { return vector_type_; }
llvm::Type* vector_pointer_type() const { return vector_pointer_type_; }
@@ -277,7 +275,7 @@ class VectorSupportLibrary {
int64 vector_size_;
PrimitiveType primitive_type_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
llvm::Type* vector_type_;
llvm::Type* vector_pointer_type_;
llvm::Type* scalar_type_;
@@ -289,22 +287,21 @@ class VectorSupportLibrary {
// can later convert to a SSA value.
class LlvmVariable {
public:
- LlvmVariable(llvm::Type*, llvm::IRBuilder<>* ir_builder);
+ LlvmVariable(llvm::Type*, llvm::IRBuilder<>* b);
llvm::Value* Get() const;
void Set(llvm::Value* new_value);
private:
llvm::AllocaInst* alloca_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
};
class VectorVariable : public LlvmVariable {
public:
VectorVariable(VectorSupportLibrary* vector_support,
llvm::Value* initial_value)
- : LlvmVariable(vector_support->vector_type(),
- vector_support->ir_builder()) {
+ : LlvmVariable(vector_support->vector_type(), vector_support->b()) {
Set(initial_value);
}
};
@@ -313,8 +310,7 @@ class ScalarVariable : public LlvmVariable {
public:
ScalarVariable(VectorSupportLibrary* vector_support,
llvm::Value* initial_value)
- : LlvmVariable(vector_support->scalar_type(),
- vector_support->ir_builder()) {
+ : LlvmVariable(vector_support->scalar_type(), vector_support->b()) {
Set(initial_value);
}
};
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
index 51f16bdc94..097fa23027 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
@@ -212,6 +212,7 @@ class DfsHloVisitorBase {
virtual Status HandleReverse(HloInstructionPtr hlo) = 0;
virtual Status HandleSort(HloInstructionPtr hlo) = 0;
virtual Status HandleConstant(HloInstructionPtr hlo) = 0;
+ virtual Status HandleIota(HloInstructionPtr hlo) = 0;
virtual Status HandleGetTupleElement(HloInstructionPtr hlo) = 0;
virtual Status HandleReduce(HloInstructionPtr hlo) = 0;
virtual Status HandleBitcast(HloInstructionPtr hlo) = 0;
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
index 0686ca74af..f4316e0fb7 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
@@ -115,6 +115,9 @@ class DfsHloVisitorWithDefaultBase
Status HandleConstant(HloInstructionPtr constant) override {
return DefaultAction(constant);
}
+ Status HandleIota(HloInstructionPtr iota) override {
+ return DefaultAction(iota);
+ }
Status HandleGetTupleElement(HloInstructionPtr get_tuple_element) override {
return DefaultAction(get_tuple_element);
}
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
index bd68685153..b58b87a978 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.cc
@@ -61,13 +61,13 @@ int64 GlobalRandomValue() {
llvm::Value* EmitReducePrecisionFloat(llvm::Value* x, int64 exponent_bits,
int64 mantissa_bits,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
// Integer and float types for casting and constant generation.
llvm::Type* float_type = x->getType();
- llvm::IntegerType* int_type = ir_builder->getInt32Ty();
+ llvm::IntegerType* int_type = b->getInt32Ty();
// Cast the input value to an integer for bitwise manipulation.
- llvm::Value* x_as_int = ir_builder->CreateBitCast(x, int_type);
+ llvm::Value* x_as_int = b->CreateBitCast(x, int_type);
if (mantissa_bits < 23) {
// Last remaining mantissa bit.
@@ -77,22 +77,22 @@ llvm::Value* EmitReducePrecisionFloat(llvm::Value* x, int64 exponent_bits,
// equal to a base value of 0111... plus one bit if the last remaining
// mantissa bit is 1.
const uint32_t base_rounding_bias = (last_mantissa_bit_mask >> 1) - 1;
- llvm::Value* x_last_mantissa_bit = ir_builder->CreateLShr(
- ir_builder->CreateAnd(
- x_as_int, llvm::ConstantInt::get(int_type, last_mantissa_bit_mask)),
+ llvm::Value* x_last_mantissa_bit = b->CreateLShr(
+ b->CreateAnd(x_as_int,
+ llvm::ConstantInt::get(int_type, last_mantissa_bit_mask)),
(23 - mantissa_bits));
- llvm::Value* x_rounding_bias = ir_builder->CreateAdd(
- x_last_mantissa_bit,
- llvm::ConstantInt::get(int_type, base_rounding_bias));
+ llvm::Value* x_rounding_bias =
+ b->CreateAdd(x_last_mantissa_bit,
+ llvm::ConstantInt::get(int_type, base_rounding_bias));
// Add rounding bias, and mask out truncated bits. Note that the case
// where adding the rounding bias overflows into the exponent bits is
// correct; the non-masked mantissa bits will all be zero, and the
// exponent will be incremented by one.
const uint32_t truncation_mask = ~(last_mantissa_bit_mask - 1);
- x_as_int = ir_builder->CreateAdd(x_as_int, x_rounding_bias);
- x_as_int = ir_builder->CreateAnd(
- x_as_int, llvm::ConstantInt::get(int_type, truncation_mask));
+ x_as_int = b->CreateAdd(x_as_int, x_rounding_bias);
+ x_as_int = b->CreateAnd(x_as_int,
+ llvm::ConstantInt::get(int_type, truncation_mask));
}
if (exponent_bits < 8) {
@@ -120,29 +120,29 @@ llvm::Value* EmitReducePrecisionFloat(llvm::Value* x, int64 exponent_bits,
f32_exponent_bias - reduced_exponent_bias;
// Do we overflow or underflow?
- llvm::Value* x_exponent = ir_builder->CreateAnd(
+ llvm::Value* x_exponent = b->CreateAnd(
x_as_int, llvm::ConstantInt::get(int_type, f32_exp_bits_mask));
- llvm::Value* x_overflows = ir_builder->CreateICmpUGT(
+ llvm::Value* x_overflows = b->CreateICmpUGT(
x_exponent,
llvm::ConstantInt::get(int_type, reduced_max_exponent << 23));
- llvm::Value* x_underflows = ir_builder->CreateICmpULE(
+ llvm::Value* x_underflows = b->CreateICmpULE(
x_exponent,
llvm::ConstantInt::get(int_type, reduced_min_exponent << 23));
// Compute appropriately-signed values of zero and infinity.
- llvm::Value* x_signed_zero = ir_builder->CreateAnd(
+ llvm::Value* x_signed_zero = b->CreateAnd(
x_as_int, llvm::ConstantInt::get(int_type, f32_sign_bit_mask));
- llvm::Value* x_signed_inf = ir_builder->CreateOr(
+ llvm::Value* x_signed_inf = b->CreateOr(
x_signed_zero, llvm::ConstantInt::get(int_type, f32_exp_bits_mask));
// Force to zero or infinity if overflow or underflow. (Note that this
// truncates all denormal values to zero, rather than rounding them.)
- x_as_int = ir_builder->CreateSelect(x_overflows, x_signed_inf, x_as_int);
- x_as_int = ir_builder->CreateSelect(x_underflows, x_signed_zero, x_as_int);
+ x_as_int = b->CreateSelect(x_overflows, x_signed_inf, x_as_int);
+ x_as_int = b->CreateSelect(x_underflows, x_signed_zero, x_as_int);
}
// Cast the result back to a floating-point type.
- llvm::Value* result = ir_builder->CreateBitCast(x_as_int, float_type);
+ llvm::Value* result = b->CreateBitCast(x_as_int, float_type);
// Correct result for NaN inputs.
//
@@ -154,53 +154,49 @@ llvm::Value* EmitReducePrecisionFloat(llvm::Value* x, int64 exponent_bits,
//
// If the fast-math flags are set to assume no NaNs, the comparison is likely
// to be optimized away, so there's no point in even emitting it.
- if (!ir_builder->getFastMathFlags().noNaNs()) {
- llvm::Value* x_is_nan = ir_builder->CreateFCmpUNO(x, x);
+ if (!b->getFastMathFlags().noNaNs()) {
+ llvm::Value* x_is_nan = b->CreateFCmpUNO(x, x);
if (mantissa_bits > 0) {
- result = ir_builder->CreateSelect(x_is_nan, x, result);
+ result = b->CreateSelect(x_is_nan, x, result);
} else {
- result = ir_builder->CreateSelect(
+ result = b->CreateSelect(
x_is_nan, llvm::ConstantFP::getInfinity(float_type), result);
}
}
return result;
}
-llvm::Value* EmitF32ToBF16(llvm::Value* f32_value,
- llvm::IRBuilder<>* ir_builder) {
+llvm::Value* EmitF32ToBF16(llvm::Value* f32_value, llvm::IRBuilder<>* b) {
auto reduced_precision = EmitReducePrecisionFloat(
f32_value,
/*exponent_bits=*/primitive_util::kBFloat16ExponentBits,
- /*mantissa_bits=*/primitive_util::kBFloat16MantissaBits, ir_builder);
- auto as_int32 =
- ir_builder->CreateBitCast(reduced_precision, ir_builder->getInt32Ty());
- auto shifted = ir_builder->CreateLShr(as_int32, 16);
- auto truncated = ir_builder->CreateTrunc(shifted, ir_builder->getInt16Ty());
- return ir_builder->CreateBitCast(truncated, ir_builder->getInt16Ty());
+ /*mantissa_bits=*/primitive_util::kBFloat16MantissaBits, b);
+ auto as_int32 = b->CreateBitCast(reduced_precision, b->getInt32Ty());
+ auto shifted = b->CreateLShr(as_int32, 16);
+ auto truncated = b->CreateTrunc(shifted, b->getInt16Ty());
+ return b->CreateBitCast(truncated, b->getInt16Ty());
}
-llvm::Value* EmitBF16ToF32(llvm::Value* bf16_value,
- llvm::IRBuilder<>* ir_builder) {
- auto as_int16 =
- ir_builder->CreateBitCast(bf16_value, ir_builder->getInt16Ty());
- auto as_int32 = ir_builder->CreateZExt(as_int16, ir_builder->getInt32Ty());
- auto shifted = ir_builder->CreateShl(as_int32, 16);
- return ir_builder->CreateBitCast(shifted, ir_builder->getFloatTy());
+llvm::Value* EmitBF16ToF32(llvm::Value* bf16_value, llvm::IRBuilder<>* b) {
+ auto as_int16 = b->CreateBitCast(bf16_value, b->getInt16Ty());
+ auto as_int32 = b->CreateZExt(as_int16, b->getInt32Ty());
+ auto shifted = b->CreateShl(as_int32, 16);
+ return b->CreateBitCast(shifted, b->getFloatTy());
}
llvm::Value* EmitIntegralToFloating(llvm::Value* integer_value,
PrimitiveType from_type,
PrimitiveType to_type, llvm::Module* module,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
if (primitive_util::IsSignedIntegralType(from_type)) {
- return ir_builder->CreateSIToFP(
- integer_value, llvm_ir::PrimitiveTypeToIrType(to_type, module));
+ return b->CreateSIToFP(integer_value,
+ llvm_ir::PrimitiveTypeToIrType(to_type, module));
} else {
CHECK(primitive_util::IsUnsignedIntegralType(from_type) ||
from_type == PRED);
- return ir_builder->CreateUIToFP(
- integer_value, llvm_ir::PrimitiveTypeToIrType(to_type, module));
+ return b->CreateUIToFP(integer_value,
+ llvm_ir::PrimitiveTypeToIrType(to_type, module));
}
}
@@ -226,39 +222,43 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
case HloOpcode::kConvert: {
PrimitiveType from_type = op->operand(0)->shape().element_type();
PrimitiveType to_type = op->shape().element_type();
- CHECK(primitive_util::IsIntegralType(from_type) || from_type == PRED);
+ CHECK(primitive_util::IsIntegralType(from_type) || from_type == PRED)
+ << from_type;
if (from_type == to_type) {
return operand_value;
}
+ if (to_type == PRED) {
+ return b_->CreateZExt(
+ b_->CreateICmpNE(operand_value, llvm::ConstantInt::get(
+ operand_value->getType(), 0)),
+ llvm_ir::PrimitiveTypeToIrType(PRED, module_));
+ }
if (primitive_util::IsIntegralType(to_type)) {
- return ir_builder_->CreateIntCast(
+ return b_->CreateIntCast(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_),
primitive_util::IsSignedIntegralType(from_type));
}
if (primitive_util::IsFloatingPointType(to_type)) {
if (to_type == BF16) {
- return EmitF32ToBF16(
- EmitIntegralToFloating(operand_value, from_type, F32, module_,
- ir_builder_),
- ir_builder_);
+ return EmitF32ToBF16(EmitIntegralToFloating(operand_value, from_type,
+ F32, module_, b_),
+ b_);
}
return EmitIntegralToFloating(operand_value, from_type, to_type,
- module_, ir_builder_);
+ module_, b_);
}
if (primitive_util::IsComplexType(to_type)) {
auto to_ir_component_type = llvm_ir::PrimitiveTypeToIrType(
primitive_util::ComplexComponentType(to_type), module_);
if (primitive_util::IsSignedIntegralType(from_type)) {
return EmitComposeComplex(
- op,
- ir_builder_->CreateSIToFP(operand_value, to_ir_component_type),
+ op, b_->CreateSIToFP(operand_value, to_ir_component_type),
nullptr);
}
if (primitive_util::IsUnsignedIntegralType(from_type) ||
from_type == PRED) {
return EmitComposeComplex(
- op,
- ir_builder_->CreateUIToFP(operand_value, to_ir_component_type),
+ op, b_->CreateUIToFP(operand_value, to_ir_component_type),
nullptr);
}
}
@@ -275,7 +275,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
}
if (primitive_util::BitWidth(from_type) ==
primitive_util::BitWidth(to_type)) {
- return ir_builder_->CreateBitCast(
+ return b_->CreateBitCast(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
}
return InvalidArgument(
@@ -293,18 +293,18 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
auto type =
llvm_ir::PrimitiveTypeToIrType(op->shape().element_type(), module_);
auto zero = llvm::ConstantInt::get(type, 0);
- auto cmp = ir_builder_->CreateICmpSGE(operand_value, zero);
- return ir_builder_->CreateSelect(cmp, operand_value,
- ir_builder_->CreateNeg(operand_value));
+ auto cmp = b_->CreateICmpSGE(operand_value, zero);
+ return b_->CreateSelect(cmp, operand_value,
+ b_->CreateNeg(operand_value));
} else {
return operand_value;
}
}
case HloOpcode::kClz: {
- auto is_zero_undef = ir_builder_->getFalse();
- return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::ctlz, {operand_value, is_zero_undef},
- {operand_value->getType()}, ir_builder_);
+ auto is_zero_undef = b_->getFalse();
+ return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::ctlz,
+ {operand_value, is_zero_undef},
+ {operand_value->getType()}, b_);
}
case HloOpcode::kSign: {
bool is_signed =
@@ -312,31 +312,28 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerUnaryOp(
auto type =
llvm_ir::PrimitiveTypeToIrType(op->shape().element_type(), module_);
auto zero = llvm::ConstantInt::get(type, 0);
- auto cmp = ir_builder_->CreateICmpEQ(operand_value, zero);
+ auto cmp = b_->CreateICmpEQ(operand_value, zero);
if (is_signed) {
- auto ashr = ir_builder_->CreateAShr(operand_value,
- type->getIntegerBitWidth() - 1);
- return ir_builder_->CreateSelect(cmp, zero,
- ir_builder_->CreateOr(ashr, 1));
+ auto ashr =
+ b_->CreateAShr(operand_value, type->getIntegerBitWidth() - 1);
+ return b_->CreateSelect(cmp, zero, b_->CreateOr(ashr, 1));
} else {
- return ir_builder_->CreateSelect(cmp, zero,
- llvm::ConstantInt::get(type, 1));
+ return b_->CreateSelect(cmp, zero, llvm::ConstantInt::get(type, 1));
}
}
case HloOpcode::kNegate:
- return ir_builder_->CreateNeg(operand_value);
+ return b_->CreateNeg(operand_value);
case HloOpcode::kNot: {
auto type = op->shape().element_type();
if (type == PRED) {
// It is not sufficient to just call CreateNot() here because a PRED
// is represented as an i8 and the truth value is stored only in the
// bottom bit.
- return ir_builder_->CreateZExt(
- ir_builder_->CreateNot(ir_builder_->CreateTrunc(
- operand_value, ir_builder_->getInt1Ty())),
+ return b_->CreateZExt(
+ b_->CreateNot(b_->CreateTrunc(operand_value, b_->getInt1Ty())),
llvm_ir::PrimitiveTypeToIrType(PRED, module_));
} else if (primitive_util::IsIntegralType(type)) {
- return ir_builder_->CreateNot(operand_value);
+ return b_->CreateNot(operand_value);
}
return Unimplemented("unary op Not is not defined for type '%d'", type);
}
@@ -352,7 +349,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
case HloOpcode::kConvert: {
PrimitiveType from_type = op->operand(0)->shape().element_type();
PrimitiveType to_type = op->shape().element_type();
- CHECK(primitive_util::IsFloatingPointType(from_type));
+ CHECK(primitive_util::IsFloatingPointType(from_type)) << from_type;
if (from_type == to_type) {
return operand_value;
}
@@ -364,32 +361,38 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
}
return EmitComposeComplex(
op,
- ir_builder_->CreateFPCast(
- operand_value,
- llvm_ir::PrimitiveTypeToIrType(to_component_type, module_)),
+ b_->CreateFPCast(operand_value, llvm_ir::PrimitiveTypeToIrType(
+ to_component_type, module_)),
nullptr);
}
if (from_type == BF16) {
TF_RET_CHECK(to_type != BF16);
- operand_value = EmitBF16ToF32(operand_value, ir_builder_);
+ operand_value = EmitBF16ToF32(operand_value, b_);
from_type = F32;
if (from_type == to_type) {
return operand_value;
}
}
if (from_type == F32 && to_type == BF16) {
- return EmitF32ToBF16(operand_value, ir_builder_);
+ return EmitF32ToBF16(operand_value, b_);
+ }
+ if (to_type == PRED) {
+ return b_->CreateZExt(
+ b_->CreateFCmpUNE(
+ operand_value,
+ llvm::ConstantFP::get(operand_value->getType(), 0.0)),
+ llvm_ir::PrimitiveTypeToIrType(PRED, module_));
}
if (primitive_util::IsFloatingPointType(to_type)) {
- return ir_builder_->CreateFPCast(
+ return b_->CreateFPCast(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
}
if (primitive_util::IsSignedIntegralType(to_type)) {
- return ir_builder_->CreateFPToSI(
+ return b_->CreateFPToSI(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
}
if (primitive_util::IsUnsignedIntegralType(to_type)) {
- return ir_builder_->CreateFPToUI(
+ return b_->CreateFPToUI(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
}
return Unimplemented("unhandled conversion operation: %s => %s",
@@ -405,7 +408,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
}
if (primitive_util::BitWidth(from_type) ==
primitive_util::BitWidth(to_type)) {
- return ir_builder_->CreateBitCast(
+ return b_->CreateBitCast(
operand_value, llvm_ir::PrimitiveTypeToIrType(to_type, module_));
}
return InvalidArgument(
@@ -429,45 +432,49 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatUnaryOp(
case HloOpcode::kSin:
return EmitSin(op->shape().element_type(), operand_value);
case HloOpcode::kFloor:
- return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::floor, {operand_value}, {operand_value->getType()},
- ir_builder_);
+ return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::floor,
+ {operand_value},
+ {operand_value->getType()}, b_);
case HloOpcode::kCeil:
- return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::ceil, {operand_value}, {operand_value->getType()},
- ir_builder_);
+ return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::ceil,
+ {operand_value},
+ {operand_value->getType()}, b_);
case HloOpcode::kAbs:
- return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::fabs, {operand_value}, {operand_value->getType()},
- ir_builder_);
+ return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs,
+ {operand_value},
+ {operand_value->getType()}, b_);
case HloOpcode::kRoundNearestAfz:
- return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::round, {operand_value}, {operand_value->getType()},
- ir_builder_);
+ return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::round,
+ {operand_value},
+ {operand_value->getType()}, b_);
case HloOpcode::kSign: {
// TODO(b/32151903): Ensure consistent sign behavior for -0.0.
auto type = operand_value->getType();
auto zero = llvm::ConstantFP::get(type, 0.0);
- auto oeq = ir_builder_->CreateFCmpOEQ(operand_value, zero);
- auto olt = ir_builder_->CreateFCmpOLT(operand_value, zero);
- return ir_builder_->CreateSelect(
+ auto oeq = b_->CreateFCmpOEQ(operand_value, zero);
+ auto olt = b_->CreateFCmpOLT(operand_value, zero);
+ return b_->CreateSelect(
oeq, zero,
- ir_builder_->CreateSelect(olt, llvm::ConstantFP::get(type, -1.0),
- llvm::ConstantFP::get(type, 1.0)));
+ b_->CreateSelect(olt, llvm::ConstantFP::get(type, -1.0),
+ llvm::ConstantFP::get(type, 1.0)));
}
case HloOpcode::kIsFinite: {
// abs(x) o!= inf, this works because the comparison returns false if
// either operand is NaN.
auto type = operand_value->getType();
auto abs_value = llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::fabs, {operand_value}, {type}, ir_builder_);
+ llvm::Intrinsic::fabs, {operand_value}, {type}, b_);
auto infinity = llvm::ConstantFP::getInfinity(type);
- auto not_infinite = ir_builder_->CreateFCmpONE(abs_value, infinity);
- return ir_builder_->CreateZExt(
- not_infinite, llvm_ir::PrimitiveTypeToIrType(PRED, module_));
+ auto not_infinite = b_->CreateFCmpONE(abs_value, infinity);
+ return b_->CreateZExt(not_infinite,
+ llvm_ir::PrimitiveTypeToIrType(PRED, module_));
}
case HloOpcode::kNegate:
- return ir_builder_->CreateFNeg(operand_value);
+ return b_->CreateFNeg(operand_value);
+ case HloOpcode::kReal:
+ return operand_value;
+ case HloOpcode::kImag:
+ return llvm::ConstantFP::get(operand_value->getType(), 0.0);
default:
return Unimplemented("unary floating-point op '%s'",
HloOpcodeString(op->opcode()).c_str());
@@ -487,13 +494,12 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
auto a = EmitExtractReal(operand_value);
auto b = EmitExtractImag(operand_value);
llvm::Type* llvm_ty = a->getType();
- auto sum_sq = ir_builder_->CreateFAdd(ir_builder_->CreateFMul(a, a),
- ir_builder_->CreateFMul(b, b));
+ auto sum_sq = b_->CreateFAdd(b_->CreateFMul(a, a), b_->CreateFMul(b, b));
TF_ASSIGN_OR_RETURN(auto log_sum_sq, EmitLog(component_type, sum_sq));
TF_ASSIGN_OR_RETURN(auto angle, EmitAtan2(component_type, b, a));
auto one_half = llvm::ConstantFP::get(llvm_ty, 0.5);
- return EmitComposeComplex(
- op, ir_builder_->CreateFMul(one_half, log_sum_sq), angle);
+ return EmitComposeComplex(op, b_->CreateFMul(one_half, log_sum_sq),
+ angle);
}
case HloOpcode::kLog1p: {
// log1p(a+bi) = .5*log((a+1)^2+b^2) + i*atan2(b, a + 1)
@@ -501,15 +507,14 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
auto b = EmitExtractImag(operand_value);
llvm::Type* llvm_ty = a->getType();
auto one = llvm::ConstantFP::get(llvm_ty, 1.0);
- auto a_plus_one = ir_builder_->CreateFAdd(a, one);
- auto sum_sq = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(a_plus_one, a_plus_one),
- ir_builder_->CreateFMul(b, b));
+ auto a_plus_one = b_->CreateFAdd(a, one);
+ auto sum_sq = b_->CreateFAdd(b_->CreateFMul(a_plus_one, a_plus_one),
+ b_->CreateFMul(b, b));
TF_ASSIGN_OR_RETURN(auto log_sum_sq, EmitLog(component_type, sum_sq));
TF_ASSIGN_OR_RETURN(auto angle, EmitAtan2(component_type, b, a_plus_one));
auto one_half = llvm::ConstantFP::get(llvm_ty, 0.5);
- return EmitComposeComplex(
- op, ir_builder_->CreateFMul(one_half, log_sum_sq), angle);
+ return EmitComposeComplex(op, b_->CreateFMul(one_half, log_sum_sq),
+ angle);
}
case HloOpcode::kConvert: {
PrimitiveType from_type = op->operand(0)->shape().element_type();
@@ -523,12 +528,11 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
primitive_util::ComplexComponentType(to_type);
auto to_ir_component_type =
llvm_ir::PrimitiveTypeToIrType(to_component_type, module_);
- return EmitComposeComplex(
- op,
- ir_builder_->CreateFPCast(EmitExtractReal(operand_value),
- to_ir_component_type),
- ir_builder_->CreateFPCast(EmitExtractImag(operand_value),
- to_ir_component_type));
+ return EmitComposeComplex(op,
+ b_->CreateFPCast(EmitExtractReal(operand_value),
+ to_ir_component_type),
+ b_->CreateFPCast(EmitExtractImag(operand_value),
+ to_ir_component_type));
}
case HloOpcode::kExp: {
// e^(a+bi) = e^a*(cos(b)+sin(b)i)
@@ -538,8 +542,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
auto cos_b, EmitCos(component_type, EmitExtractImag(operand_value)));
TF_ASSIGN_OR_RETURN(
auto sin_b, EmitSin(component_type, EmitExtractImag(operand_value)));
- return EmitComposeComplex(op, ir_builder_->CreateFMul(exp_a, cos_b),
- ir_builder_->CreateFMul(exp_a, sin_b));
+ return EmitComposeComplex(op, b_->CreateFMul(exp_a, cos_b),
+ b_->CreateFMul(exp_a, sin_b));
}
case HloOpcode::kExpm1: {
// e^(a+bi)-1 = (e^a*cos(b)-1)+e^a*sin(b)i
@@ -550,9 +554,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
TF_ASSIGN_OR_RETURN(
auto sin_b, EmitSin(component_type, EmitExtractImag(operand_value)));
auto one = llvm::ConstantFP::get(exp_a->getType(), 1.0);
- auto real_result =
- ir_builder_->CreateFSub(ir_builder_->CreateFMul(exp_a, cos_b), one);
- auto imag_result = ir_builder_->CreateFMul(exp_a, sin_b);
+ auto real_result = b_->CreateFSub(b_->CreateFMul(exp_a, cos_b), one);
+ auto imag_result = b_->CreateFMul(exp_a, sin_b);
return EmitComposeComplex(op, real_result, imag_result);
}
case HloOpcode::kCos: {
@@ -567,18 +570,14 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
auto b = EmitExtractImag(operand_value);
auto type = a->getType();
TF_ASSIGN_OR_RETURN(auto exp_b, EmitExp(component_type, b));
- auto half_exp_b =
- ir_builder_->CreateFMul(llvm::ConstantFP::get(type, 0.5), exp_b);
+ auto half_exp_b = b_->CreateFMul(llvm::ConstantFP::get(type, 0.5), exp_b);
auto half_exp_neg_b =
- ir_builder_->CreateFDiv(llvm::ConstantFP::get(type, 0.5), exp_b);
+ b_->CreateFDiv(llvm::ConstantFP::get(type, 0.5), exp_b);
TF_ASSIGN_OR_RETURN(auto cos_a, EmitCos(component_type, a));
TF_ASSIGN_OR_RETURN(auto sin_a, EmitSin(component_type, a));
return EmitComposeComplex(
- op,
- ir_builder_->CreateFMul(
- cos_a, ir_builder_->CreateFAdd(half_exp_neg_b, half_exp_b)),
- ir_builder_->CreateFMul(
- sin_a, ir_builder_->CreateFSub(half_exp_neg_b, half_exp_b)));
+ op, b_->CreateFMul(cos_a, b_->CreateFAdd(half_exp_neg_b, half_exp_b)),
+ b_->CreateFMul(sin_a, b_->CreateFSub(half_exp_neg_b, half_exp_b)));
}
case HloOpcode::kSin: {
// sin(z) = .5i(e^(-iz) - e^(iz))
@@ -594,18 +593,14 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
auto b = EmitExtractImag(operand_value);
auto type = a->getType();
TF_ASSIGN_OR_RETURN(auto exp_b, EmitExp(component_type, b));
- auto half_exp_b =
- ir_builder_->CreateFMul(llvm::ConstantFP::get(type, 0.5), exp_b);
+ auto half_exp_b = b_->CreateFMul(llvm::ConstantFP::get(type, 0.5), exp_b);
auto half_exp_neg_b =
- ir_builder_->CreateFDiv(llvm::ConstantFP::get(type, 0.5), exp_b);
+ b_->CreateFDiv(llvm::ConstantFP::get(type, 0.5), exp_b);
TF_ASSIGN_OR_RETURN(auto cos_a, EmitCos(component_type, a));
TF_ASSIGN_OR_RETURN(auto sin_a, EmitSin(component_type, a));
return EmitComposeComplex(
- op,
- ir_builder_->CreateFMul(
- sin_a, ir_builder_->CreateFAdd(half_exp_b, half_exp_neg_b)),
- ir_builder_->CreateFMul(
- cos_a, ir_builder_->CreateFSub(half_exp_b, half_exp_neg_b)));
+ op, b_->CreateFMul(sin_a, b_->CreateFAdd(half_exp_b, half_exp_neg_b)),
+ b_->CreateFMul(cos_a, b_->CreateFSub(half_exp_b, half_exp_neg_b)));
}
case HloOpcode::kTanh: {
/*
@@ -633,64 +628,61 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexUnaryOp(
TF_ASSIGN_OR_RETURN(auto exp_a, EmitExp(component_type, a));
TF_ASSIGN_OR_RETURN(auto cos_b, EmitCos(component_type, b));
TF_ASSIGN_OR_RETURN(auto sin_b, EmitSin(component_type, b));
- auto exp_neg_a = ir_builder_->CreateFDiv(
- llvm::ConstantFP::get(exp_a->getType(), 1), exp_a);
- auto exp_2a_minus_exp_neg_2a = ir_builder_->CreateFSub(
- ir_builder_->CreateFMul(exp_a, exp_a),
- ir_builder_->CreateFMul(exp_neg_a, exp_neg_a));
- auto cos_b_sq = ir_builder_->CreateFMul(cos_b, cos_b);
- auto sin_b_sq = ir_builder_->CreateFMul(sin_b, sin_b);
- auto real_num = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(cos_b_sq, exp_2a_minus_exp_neg_2a),
- ir_builder_->CreateFMul(sin_b_sq, exp_2a_minus_exp_neg_2a));
- auto cos_b_sin_b = ir_builder_->CreateFMul(cos_b, sin_b);
- auto exp_a_plus_exp_neg_a = ir_builder_->CreateFAdd(exp_a, exp_neg_a);
+ auto exp_neg_a =
+ b_->CreateFDiv(llvm::ConstantFP::get(exp_a->getType(), 1), exp_a);
+ auto exp_2a_minus_exp_neg_2a = b_->CreateFSub(
+ b_->CreateFMul(exp_a, exp_a), b_->CreateFMul(exp_neg_a, exp_neg_a));
+ auto cos_b_sq = b_->CreateFMul(cos_b, cos_b);
+ auto sin_b_sq = b_->CreateFMul(sin_b, sin_b);
+ auto real_num =
+ b_->CreateFAdd(b_->CreateFMul(cos_b_sq, exp_2a_minus_exp_neg_2a),
+ b_->CreateFMul(sin_b_sq, exp_2a_minus_exp_neg_2a));
+ auto cos_b_sin_b = b_->CreateFMul(cos_b, sin_b);
+ auto exp_a_plus_exp_neg_a = b_->CreateFAdd(exp_a, exp_neg_a);
auto exp_a_plus_exp_neg_a_sq =
- ir_builder_->CreateFMul(exp_a_plus_exp_neg_a, exp_a_plus_exp_neg_a);
- auto exp_a_minus_exp_neg_a = ir_builder_->CreateFSub(exp_a, exp_neg_a);
+ b_->CreateFMul(exp_a_plus_exp_neg_a, exp_a_plus_exp_neg_a);
+ auto exp_a_minus_exp_neg_a = b_->CreateFSub(exp_a, exp_neg_a);
auto exp_a_minus_exp_neg_a_sq =
- ir_builder_->CreateFMul(exp_a_minus_exp_neg_a, exp_a_minus_exp_neg_a);
- auto imag_num = ir_builder_->CreateFMul(
- cos_b_sin_b, ir_builder_->CreateFSub(exp_a_plus_exp_neg_a_sq,
- exp_a_minus_exp_neg_a_sq));
- auto denom = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(cos_b_sq, exp_a_plus_exp_neg_a_sq),
- ir_builder_->CreateFMul(sin_b_sq, exp_a_minus_exp_neg_a_sq));
- return EmitComposeComplex(op, ir_builder_->CreateFDiv(real_num, denom),
- ir_builder_->CreateFDiv(imag_num, denom));
+ b_->CreateFMul(exp_a_minus_exp_neg_a, exp_a_minus_exp_neg_a);
+ auto imag_num = b_->CreateFMul(
+ cos_b_sin_b,
+ b_->CreateFSub(exp_a_plus_exp_neg_a_sq, exp_a_minus_exp_neg_a_sq));
+ auto denom =
+ b_->CreateFAdd(b_->CreateFMul(cos_b_sq, exp_a_plus_exp_neg_a_sq),
+ b_->CreateFMul(sin_b_sq, exp_a_minus_exp_neg_a_sq));
+ return EmitComposeComplex(op, b_->CreateFDiv(real_num, denom),
+ b_->CreateFDiv(imag_num, denom));
}
case HloOpcode::kAbs: {
- auto sum_sq = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(EmitExtractReal(operand_value),
- EmitExtractReal(operand_value)),
- ir_builder_->CreateFMul(EmitExtractImag(operand_value),
- EmitExtractImag(operand_value)));
+ auto sum_sq =
+ b_->CreateFAdd(b_->CreateFMul(EmitExtractReal(operand_value),
+ EmitExtractReal(operand_value)),
+ b_->CreateFMul(EmitExtractImag(operand_value),
+ EmitExtractImag(operand_value)));
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::sqrt, {sum_sq},
- {sum_sq->getType()}, ir_builder_);
+ {sum_sq->getType()}, b_);
}
case HloOpcode::kSign: { // Sign(c) = c / |c|
- auto sum_sq = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(EmitExtractReal(operand_value),
- EmitExtractReal(operand_value)),
- ir_builder_->CreateFMul(EmitExtractImag(operand_value),
- EmitExtractImag(operand_value)));
+ auto sum_sq =
+ b_->CreateFAdd(b_->CreateFMul(EmitExtractReal(operand_value),
+ EmitExtractReal(operand_value)),
+ b_->CreateFMul(EmitExtractImag(operand_value),
+ EmitExtractImag(operand_value)));
auto cplx_abs = llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::sqrt, {sum_sq}, {sum_sq->getType()}, ir_builder_);
+ llvm::Intrinsic::sqrt, {sum_sq}, {sum_sq->getType()}, b_);
auto type = cplx_abs->getType();
auto zero = llvm::ConstantFP::get(type, 0.0);
- auto oeq = ir_builder_->CreateFCmpOEQ(cplx_abs, zero);
- return ir_builder_->CreateSelect(
+ auto oeq = b_->CreateFCmpOEQ(cplx_abs, zero);
+ return b_->CreateSelect(
oeq, EmitComposeComplex(op, zero, zero),
EmitComposeComplex(
- op,
- ir_builder_->CreateFDiv(EmitExtractReal(operand_value), cplx_abs),
- ir_builder_->CreateFDiv(EmitExtractImag(operand_value),
- cplx_abs)));
+ op, b_->CreateFDiv(EmitExtractReal(operand_value), cplx_abs),
+ b_->CreateFDiv(EmitExtractImag(operand_value), cplx_abs)));
}
case HloOpcode::kNegate:
- return EmitComposeComplex(
- op, ir_builder_->CreateFNeg(EmitExtractReal(operand_value)),
- ir_builder_->CreateFNeg(EmitExtractImag(operand_value)));
+ return EmitComposeComplex(op,
+ b_->CreateFNeg(EmitExtractReal(operand_value)),
+ b_->CreateFNeg(EmitExtractImag(operand_value)));
case HloOpcode::kReal:
return EmitExtractReal(operand_value);
case HloOpcode::kImag:
@@ -724,15 +716,15 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatBinaryOp(
case HloOpcode::kComplex:
return EmitComposeComplex(op, lhs_value, rhs_value);
case HloOpcode::kAdd:
- return ir_builder_->CreateFAdd(lhs_value, rhs_value);
+ return b_->CreateFAdd(lhs_value, rhs_value);
case HloOpcode::kSubtract:
- return ir_builder_->CreateFSub(lhs_value, rhs_value);
+ return b_->CreateFSub(lhs_value, rhs_value);
case HloOpcode::kMultiply:
- return ir_builder_->CreateFMul(lhs_value, rhs_value);
+ return b_->CreateFMul(lhs_value, rhs_value);
case HloOpcode::kDivide:
- return ir_builder_->CreateFDiv(lhs_value, rhs_value);
+ return b_->CreateFDiv(lhs_value, rhs_value);
case HloOpcode::kRemainder:
- return ir_builder_->CreateFRem(lhs_value, rhs_value);
+ return b_->CreateFRem(lhs_value, rhs_value);
// LLVM comparisons can be "unordered" (U) or "ordered" (O) -- ordered
// comparisons always return false when one of the operands is NaN, whereas
// unordered comparisons return true.
@@ -742,22 +734,22 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitFloatBinaryOp(
// matches C++'s semantics.
case HloOpcode::kEq:
return llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OEQ, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kNe:
return llvm_ir::EmitComparison(llvm::CmpInst::FCMP_UNE, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kLt:
return llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OLT, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kGt:
return llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OGT, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kLe:
return llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OLE, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kGe:
return llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OGE, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kMaximum:
return EmitFloatMax(lhs_value, rhs_value);
@@ -778,64 +770,56 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexBinaryOp(
llvm::Value* rhs_value) const {
switch (op->opcode()) {
case HloOpcode::kAdd:
- return EmitComposeComplex(
- op,
- ir_builder_->CreateFAdd(EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFAdd(EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value)));
+ return EmitComposeComplex(op,
+ b_->CreateFAdd(EmitExtractReal(lhs_value),
+ EmitExtractReal(rhs_value)),
+ b_->CreateFAdd(EmitExtractImag(lhs_value),
+ EmitExtractImag(rhs_value)));
case HloOpcode::kSubtract:
- return EmitComposeComplex(
- op,
- ir_builder_->CreateFSub(EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFSub(EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value)));
+ return EmitComposeComplex(op,
+ b_->CreateFSub(EmitExtractReal(lhs_value),
+ EmitExtractReal(rhs_value)),
+ b_->CreateFSub(EmitExtractImag(lhs_value),
+ EmitExtractImag(rhs_value)));
case HloOpcode::kMultiply:
return EmitComposeComplex(
op,
- ir_builder_->CreateFSub(
- ir_builder_->CreateFMul(EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value))),
- ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(EmitExtractReal(lhs_value),
- EmitExtractImag(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractImag(lhs_value),
- EmitExtractReal(rhs_value))));
+ b_->CreateFSub(b_->CreateFMul(EmitExtractReal(lhs_value),
+ EmitExtractReal(rhs_value)),
+ b_->CreateFMul(EmitExtractImag(lhs_value),
+ EmitExtractImag(rhs_value))),
+ b_->CreateFAdd(b_->CreateFMul(EmitExtractReal(lhs_value),
+ EmitExtractImag(rhs_value)),
+ b_->CreateFMul(EmitExtractImag(lhs_value),
+ EmitExtractReal(rhs_value))));
case HloOpcode::kDivide: {
// (a+bi) / (c+di) = ((a+bi)(c-di)) / ((c+di)(c-di))
// = ((ac + bd) + (bc - ad)i) / (c^2 + d^2)
- auto rhs_sum_sq = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(EmitExtractReal(rhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractImag(rhs_value),
- EmitExtractImag(rhs_value)));
+ auto rhs_sum_sq =
+ b_->CreateFAdd(b_->CreateFMul(EmitExtractReal(rhs_value),
+ EmitExtractReal(rhs_value)),
+ b_->CreateFMul(EmitExtractImag(rhs_value),
+ EmitExtractImag(rhs_value)));
auto type = rhs_sum_sq->getType();
auto zero = llvm::ConstantFP::get(type, 0.0);
- auto oeq = ir_builder_->CreateFCmpOEQ(rhs_sum_sq, zero);
- auto real_inf_or_nan =
- ir_builder_->CreateFDiv(EmitExtractReal(lhs_value), zero);
- auto imag_inf_or_nan =
- ir_builder_->CreateFDiv(EmitExtractImag(lhs_value), zero);
- return ir_builder_->CreateSelect(
+ auto oeq = b_->CreateFCmpOEQ(rhs_sum_sq, zero);
+ auto real_inf_or_nan = b_->CreateFDiv(EmitExtractReal(lhs_value), zero);
+ auto imag_inf_or_nan = b_->CreateFDiv(EmitExtractImag(lhs_value), zero);
+ return b_->CreateSelect(
oeq, EmitComposeComplex(op, real_inf_or_nan, imag_inf_or_nan),
EmitComposeComplex(
op,
- ir_builder_->CreateFDiv(
- ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value))),
+ b_->CreateFDiv(
+ b_->CreateFAdd(b_->CreateFMul(EmitExtractReal(lhs_value),
+ EmitExtractReal(rhs_value)),
+ b_->CreateFMul(EmitExtractImag(lhs_value),
+ EmitExtractImag(rhs_value))),
rhs_sum_sq),
- ir_builder_->CreateFDiv(
- ir_builder_->CreateFSub(
- ir_builder_->CreateFMul(EmitExtractImag(lhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractReal(lhs_value),
- EmitExtractImag(rhs_value))),
+ b_->CreateFDiv(
+ b_->CreateFSub(b_->CreateFMul(EmitExtractImag(lhs_value),
+ EmitExtractReal(rhs_value)),
+ b_->CreateFMul(EmitExtractReal(lhs_value),
+ EmitExtractImag(rhs_value))),
rhs_sum_sq)));
}
// LLVM comparisons can be "unordered" (U) or "ordered" (O) -- ordered
@@ -846,21 +830,21 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexBinaryOp(
// unordered comparison. This makes x != y equivalent to !(x == y), and
// matches C++'s semantics.
case HloOpcode::kEq:
- return ir_builder_->CreateAnd(
+ return b_->CreateAnd(
llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OEQ,
EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value), ir_builder_),
+ EmitExtractReal(rhs_value), b_),
llvm_ir::EmitComparison(llvm::CmpInst::FCMP_OEQ,
EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value), ir_builder_));
+ EmitExtractImag(rhs_value), b_));
case HloOpcode::kNe:
- return ir_builder_->CreateOr(
+ return b_->CreateOr(
llvm_ir::EmitComparison(llvm::CmpInst::FCMP_UNE,
EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value), ir_builder_),
+ EmitExtractReal(rhs_value), b_),
llvm_ir::EmitComparison(llvm::CmpInst::FCMP_UNE,
EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value), ir_builder_));
+ EmitExtractImag(rhs_value), b_));
case HloOpcode::kPower: {
// (a+bi)^(c+di) =
@@ -872,29 +856,26 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexBinaryOp(
auto b = EmitExtractImag(lhs_value);
auto c = EmitExtractReal(rhs_value);
auto d = EmitExtractImag(rhs_value);
- auto aa_p_bb = ir_builder_->CreateFAdd(ir_builder_->CreateFMul(a, a),
- ir_builder_->CreateFMul(b, b));
+ auto aa_p_bb = b_->CreateFAdd(b_->CreateFMul(a, a), b_->CreateFMul(b, b));
auto one_half = llvm::ConstantFP::get(a->getType(), 0.5);
- auto half_c = ir_builder_->CreateFMul(one_half, c);
+ auto half_c = b_->CreateFMul(one_half, c);
TF_ASSIGN_OR_RETURN(auto aa_p_bb_to_half_c,
EmitPow(component_type, aa_p_bb, half_c));
- auto neg_d = ir_builder_->CreateFNeg(d);
+ auto neg_d = b_->CreateFNeg(d);
TF_ASSIGN_OR_RETURN(auto arg_lhs, EmitAtan2(component_type, b, a));
- auto neg_d_arg_lhs = ir_builder_->CreateFMul(neg_d, arg_lhs);
+ auto neg_d_arg_lhs = b_->CreateFMul(neg_d, arg_lhs);
TF_ASSIGN_OR_RETURN(auto e_to_neg_d_arg_lhs,
EmitExp(component_type, neg_d_arg_lhs));
- auto coeff =
- ir_builder_->CreateFMul(aa_p_bb_to_half_c, e_to_neg_d_arg_lhs);
+ auto coeff = b_->CreateFMul(aa_p_bb_to_half_c, e_to_neg_d_arg_lhs);
TF_ASSIGN_OR_RETURN(auto ln_aa_p_bb, EmitLog(component_type, aa_p_bb));
- auto half_d = ir_builder_->CreateFMul(one_half, d);
- auto q =
- ir_builder_->CreateFAdd(ir_builder_->CreateFMul(c, arg_lhs),
- ir_builder_->CreateFMul(half_d, ln_aa_p_bb));
+ auto half_d = b_->CreateFMul(one_half, d);
+ auto q = b_->CreateFAdd(b_->CreateFMul(c, arg_lhs),
+ b_->CreateFMul(half_d, ln_aa_p_bb));
TF_ASSIGN_OR_RETURN(auto cos_q, EmitCos(component_type, q));
TF_ASSIGN_OR_RETURN(auto sin_q, EmitSin(component_type, q));
- return EmitComposeComplex(op, ir_builder_->CreateFMul(coeff, cos_q),
- ir_builder_->CreateFMul(coeff, sin_q));
+ return EmitComposeComplex(op, b_->CreateFMul(coeff, cos_q),
+ b_->CreateFMul(coeff, sin_q));
}
default:
return Unimplemented("binary complex op '%s'",
@@ -904,12 +885,12 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitComplexBinaryOp(
llvm::Value* ElementalIrEmitter::EmitFloatMax(llvm::Value* lhs_value,
llvm::Value* rhs_value) const {
- return llvm_ir::EmitFloatMax(lhs_value, rhs_value, ir_builder_);
+ return llvm_ir::EmitFloatMax(lhs_value, rhs_value, b_);
}
llvm::Value* ElementalIrEmitter::EmitFloatMin(llvm::Value* lhs_value,
llvm::Value* rhs_value) const {
- return llvm_ir::EmitFloatMin(lhs_value, rhs_value, ir_builder_);
+ return llvm_ir::EmitFloatMin(lhs_value, rhs_value, b_);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfInv(PrimitiveType prim_type,
@@ -921,15 +902,14 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfInv(PrimitiveType prim_type,
"type F32.");
}
auto getFloat = [&](const float f) {
- return llvm::ConstantFP::get(ir_builder_->getFloatTy(), f);
+ return llvm::ConstantFP::get(b_->getFloatTy(), f);
};
auto multiply_add = [&](tensorflow::gtl::ArraySlice<float> coefficients,
llvm::Value* w) {
llvm::Value* p = getFloat(coefficients.front());
coefficients.pop_front();
for (float coefficient : coefficients) {
- p = ir_builder_->CreateFAdd(ir_builder_->CreateFMul(p, w),
- getFloat(coefficient));
+ p = b_->CreateFAdd(b_->CreateFMul(p, w), getFloat(coefficient));
}
return p;
};
@@ -947,50 +927,48 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfInv(PrimitiveType prim_type,
// }
// return p*x
llvm::Function* logf_fn = llvm::Intrinsic::getDeclaration(
- module_, llvm::Intrinsic::log, {ir_builder_->getFloatTy()});
+ module_, llvm::Intrinsic::log, {b_->getFloatTy()});
- llvm::Value* w = ir_builder_->CreateFNeg(ir_builder_->CreateCall(
- logf_fn,
- {ir_builder_->CreateFMul(ir_builder_->CreateFSub(getFloat(1.0f), x),
- ir_builder_->CreateFAdd(getFloat(1.0f), x))}));
+ llvm::Value* w = b_->CreateFNeg(b_->CreateCall(
+ logf_fn, {b_->CreateFMul(b_->CreateFSub(getFloat(1.0f), x),
+ b_->CreateFAdd(getFloat(1.0f), x))}));
- llvm::Value* p_addr = llvm_ir::EmitAllocaAtFunctionEntry(
- ir_builder_->getFloatTy(), "p.addr", ir_builder_);
+ llvm::Value* p_addr =
+ llvm_ir::EmitAllocaAtFunctionEntry(b_->getFloatTy(), "p.addr", b_);
- llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(ir_builder_->CreateFCmpOLT(w, getFloat(5.0f)),
- "w_less_than_five", ir_builder_);
+ llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
+ b_->CreateFCmpOLT(w, getFloat(5.0f)), "w_less_than_five", b_);
// Handle true BB.
- SetToFirstInsertPoint(if_data.true_block, ir_builder_);
+ SetToFirstInsertPoint(if_data.true_block, b_);
{
- llvm::Value* lw = ir_builder_->CreateFSub(w, getFloat(2.5f));
+ llvm::Value* lw = b_->CreateFSub(w, getFloat(2.5f));
tensorflow::gtl::ArraySlice<float> lq{
2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,
-4.39150654e-06f, 0.00021858087f, -0.00125372503f,
-0.00417768164f, 0.246640727f, 1.50140941f};
llvm::Value* p = multiply_add(lq, lw);
- ir_builder_->CreateStore(p, p_addr);
+ b_->CreateStore(p, p_addr);
}
// Handle false BB.
- SetToFirstInsertPoint(if_data.false_block, ir_builder_);
+ SetToFirstInsertPoint(if_data.false_block, b_);
{
llvm::Function* sqrtf_fn = llvm::Intrinsic::getDeclaration(
- module_, llvm::Intrinsic::sqrt, {ir_builder_->getFloatTy()});
+ module_, llvm::Intrinsic::sqrt, {b_->getFloatTy()});
- llvm::Value* gw = ir_builder_->CreateFSub(
- ir_builder_->CreateCall(sqrtf_fn, {w}), getFloat(3.0f));
+ llvm::Value* gw =
+ b_->CreateFSub(b_->CreateCall(sqrtf_fn, {w}), getFloat(3.0f));
tensorflow::gtl::ArraySlice<float> gq{
-0.000200214257f, 0.000100950558f, 0.00134934322f,
-0.00367342844f, 0.00573950773f, -0.0076224613f,
0.00943887047f, 1.00167406f, 2.83297682f};
llvm::Value* p = multiply_add(gq, gw);
- ir_builder_->CreateStore(p, p_addr);
+ b_->CreateStore(p, p_addr);
}
- SetToFirstInsertPoint(if_data.after_block, ir_builder_);
- llvm::Value* p = ir_builder_->CreateLoad(p_addr);
- return ir_builder_->CreateFMul(p, x);
+ SetToFirstInsertPoint(if_data.after_block, b_);
+ llvm::Value* p = b_->CreateLoad(p_addr);
+ return b_->CreateFMul(p, x);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfcInv(
@@ -998,13 +976,13 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitErfcInv(
// Compute erfcinv(value) by calculating erfinv(1.0 - value).
auto type = llvm_ir::PrimitiveTypeToIrType(prim_type, module_);
auto one = llvm::ConstantFP::get(type, 1.0);
- return EmitErfInv(prim_type, ir_builder_->CreateFSub(one, value));
+ return EmitErfInv(prim_type, b_->CreateFSub(one, value));
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitLog(PrimitiveType prim_type,
llvm::Value* value) const {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::log, {value},
- {value->getType()}, ir_builder_);
+ {value->getType()}, b_);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitLog1p(PrimitiveType prim_type,
@@ -1016,35 +994,34 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitLog1p(PrimitiveType prim_type,
// When x is large, the naive evaluation of ln(x + 1) is more
// accurate than the Taylor series.
TF_ASSIGN_OR_RETURN(auto for_large_x,
- EmitLog(prim_type, ir_builder_->CreateFAdd(x, one)));
+ EmitLog(prim_type, b_->CreateFAdd(x, one)));
// The Taylor series for ln(x+1) is x - x^2/2 - x^3/3 + ….
- auto for_small_x = ir_builder_->CreateFMul(
- ir_builder_->CreateFAdd(ir_builder_->CreateFMul(negative_half, x), one),
- x);
+ auto for_small_x =
+ b_->CreateFMul(b_->CreateFAdd(b_->CreateFMul(negative_half, x), one), x);
const auto kAntilogarithmIsSmallThreshold = 1e-4;
- auto abs_x = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {value},
- {type}, ir_builder_);
- auto x_is_small = ir_builder_->CreateFCmpOLT(
+ auto abs_x =
+ llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {value}, {type}, b_);
+ auto x_is_small = b_->CreateFCmpOLT(
abs_x, llvm::ConstantFP::get(type, kAntilogarithmIsSmallThreshold));
- return ir_builder_->CreateSelect(x_is_small, for_small_x, for_large_x);
+ return b_->CreateSelect(x_is_small, for_small_x, for_large_x);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitSin(PrimitiveType prim_type,
llvm::Value* value) const {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::sin, {value},
- {value->getType()}, ir_builder_);
+ {value->getType()}, b_);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitCos(PrimitiveType prim_type,
llvm::Value* value) const {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::cos, {value},
- {value->getType()}, ir_builder_);
+ {value->getType()}, b_);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitExp(PrimitiveType prim_type,
llvm::Value* value) const {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::exp, {value},
- {value->getType()}, ir_builder_);
+ {value->getType()}, b_);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitExpm1(PrimitiveType prim_type,
@@ -1056,25 +1033,25 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitExpm1(PrimitiveType prim_type,
// When the exponent is large, the naive evaluation of e^(x) - 1 is more
// accurate than the Taylor series.
TF_ASSIGN_OR_RETURN(auto exp_x, EmitExp(prim_type, value));
- auto for_large_x = ir_builder_->CreateFSub(exp_x, one);
+ auto for_large_x = b_->CreateFSub(exp_x, one);
// The Taylor series for exp(x) is 1 + x + x^2/2 + x^3/6 + ….
// We want exp(x)-1 which is x + x^2/2 + x^3/6 + ….
- auto x_squared = ir_builder_->CreateFAdd(x, x);
- auto x_squared_over_two = ir_builder_->CreateFMul(x_squared, half);
- auto for_small_x = ir_builder_->CreateFAdd(x, x_squared_over_two);
+ auto x_squared = b_->CreateFAdd(x, x);
+ auto x_squared_over_two = b_->CreateFMul(x_squared, half);
+ auto for_small_x = b_->CreateFAdd(x, x_squared_over_two);
const auto kExponentIsSmallThreshold = 1e-5;
- auto abs_x = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {value},
- {type}, ir_builder_);
- auto x_is_small = ir_builder_->CreateFCmpOLT(
+ auto abs_x =
+ llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {value}, {type}, b_);
+ auto x_is_small = b_->CreateFCmpOLT(
abs_x, llvm::ConstantFP::get(type, kExponentIsSmallThreshold));
- return ir_builder_->CreateSelect(x_is_small, for_small_x, for_large_x);
+ return b_->CreateSelect(x_is_small, for_small_x, for_large_x);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitPow(PrimitiveType prim_type,
llvm::Value* lhs,
llvm::Value* rhs) const {
return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::pow, {lhs, rhs},
- {lhs->getType()}, ir_builder_);
+ {lhs->getType()}, b_);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitAtan2(PrimitiveType prim_type,
@@ -1089,11 +1066,10 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitReducePrecision(
return Unimplemented("reduce-precision only implemented for F32");
}
return EmitReducePrecisionFloat(x, /*exponent_bits=*/hlo->exponent_bits(),
- /*mantissa_bits=*/hlo->mantissa_bits(),
- ir_builder_);
+ /*mantissa_bits=*/hlo->mantissa_bits(), b_);
}
-static llvm::Value* SaturateShiftIfNecessary(llvm::IRBuilder<>* ir_builder,
+static llvm::Value* SaturateShiftIfNecessary(llvm::IRBuilder<>* b,
llvm::Value* lhs, llvm::Value* rhs,
llvm::Value* shift_result,
bool saturate_to_sign_bit) {
@@ -1106,15 +1082,14 @@ static llvm::Value* SaturateShiftIfNecessary(llvm::IRBuilder<>* ir_builder,
llvm::ConstantInt* minus_one = llvm::ConstantInt::get(integer_type, -1);
llvm::Value* saturated_value;
if (saturate_to_sign_bit) {
- saturated_value = ir_builder->CreateSelect(
- ir_builder->CreateICmpSLT(lhs, zero), minus_one, zero);
+ saturated_value =
+ b->CreateSelect(b->CreateICmpSLT(lhs, zero), minus_one, zero);
} else {
saturated_value = zero;
}
llvm::Value* shift_amt_in_range =
- ir_builder->CreateICmpULT(rhs, integer_bitsize_constant, "shft.chk");
- return ir_builder->CreateSelect(shift_amt_in_range, shift_result,
- saturated_value);
+ b->CreateICmpULT(rhs, integer_bitsize_constant, "shft.chk");
+ return b->CreateSelect(shift_amt_in_range, shift_result, saturated_value);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerBinaryOp(
@@ -1123,49 +1098,49 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerBinaryOp(
switch (op->opcode()) {
// TODO(jingyue): add the "nsw" attribute for signed types.
case HloOpcode::kAdd:
- return ir_builder_->CreateAdd(lhs_value, rhs_value);
+ return b_->CreateAdd(lhs_value, rhs_value);
case HloOpcode::kSubtract:
- return ir_builder_->CreateSub(lhs_value, rhs_value);
+ return b_->CreateSub(lhs_value, rhs_value);
case HloOpcode::kMultiply:
- return ir_builder_->CreateMul(lhs_value, rhs_value);
+ return b_->CreateMul(lhs_value, rhs_value);
case HloOpcode::kDivide:
- return is_signed ? ir_builder_->CreateSDiv(lhs_value, rhs_value)
- : ir_builder_->CreateUDiv(lhs_value, rhs_value);
+ return is_signed ? b_->CreateSDiv(lhs_value, rhs_value)
+ : b_->CreateUDiv(lhs_value, rhs_value);
case HloOpcode::kRemainder:
- return is_signed ? ir_builder_->CreateSRem(lhs_value, rhs_value)
- : ir_builder_->CreateURem(lhs_value, rhs_value);
+ return is_signed ? b_->CreateSRem(lhs_value, rhs_value)
+ : b_->CreateURem(lhs_value, rhs_value);
case HloOpcode::kEq:
return llvm_ir::EmitComparison(llvm::CmpInst::ICMP_EQ, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kNe:
return llvm_ir::EmitComparison(llvm::CmpInst::ICMP_NE, lhs_value,
- rhs_value, ir_builder_);
+ rhs_value, b_);
case HloOpcode::kLt:
return llvm_ir::EmitComparison(
is_signed ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT,
- lhs_value, rhs_value, ir_builder_);
+ lhs_value, rhs_value, b_);
case HloOpcode::kGt:
return llvm_ir::EmitComparison(
is_signed ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT,
- lhs_value, rhs_value, ir_builder_);
+ lhs_value, rhs_value, b_);
case HloOpcode::kLe:
return llvm_ir::EmitComparison(
is_signed ? llvm::CmpInst::ICMP_SLE : llvm::CmpInst::ICMP_ULE,
- lhs_value, rhs_value, ir_builder_);
+ lhs_value, rhs_value, b_);
case HloOpcode::kGe:
return llvm_ir::EmitComparison(
is_signed ? llvm::CmpInst::ICMP_SGE : llvm::CmpInst::ICMP_UGE,
- lhs_value, rhs_value, ir_builder_);
+ lhs_value, rhs_value, b_);
case HloOpcode::kMinimum:
return EmitIntegralMin(lhs_value, rhs_value, is_signed);
case HloOpcode::kMaximum:
return EmitIntegralMax(lhs_value, rhs_value, is_signed);
case HloOpcode::kAnd:
- return ir_builder_->CreateAnd(lhs_value, rhs_value);
+ return b_->CreateAnd(lhs_value, rhs_value);
case HloOpcode::kOr:
- return ir_builder_->CreateOr(lhs_value, rhs_value);
+ return b_->CreateOr(lhs_value, rhs_value);
case HloOpcode::kXor:
- return ir_builder_->CreateXor(lhs_value, rhs_value);
+ return b_->CreateXor(lhs_value, rhs_value);
// Shifting out bits >= the number of bits in the type being shifted
// produces a poison value in LLVM which is basically "deferred undefined
@@ -1173,20 +1148,17 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerBinaryOp(
// UB. We replace the poison value with a constant to avoid this deferred
// UB.
case HloOpcode::kShiftRightArithmetic:
- return SaturateShiftIfNecessary(
- ir_builder_, lhs_value, rhs_value,
- ir_builder_->CreateAShr(lhs_value, rhs_value),
- /*saturate_to_sign_bit=*/true);
+ return SaturateShiftIfNecessary(b_, lhs_value, rhs_value,
+ b_->CreateAShr(lhs_value, rhs_value),
+ /*saturate_to_sign_bit=*/true);
case HloOpcode::kShiftLeft:
- return SaturateShiftIfNecessary(
- ir_builder_, lhs_value, rhs_value,
- ir_builder_->CreateShl(lhs_value, rhs_value),
- /*saturate_to_sign_bit=*/false);
+ return SaturateShiftIfNecessary(b_, lhs_value, rhs_value,
+ b_->CreateShl(lhs_value, rhs_value),
+ /*saturate_to_sign_bit=*/false);
case HloOpcode::kShiftRightLogical:
- return SaturateShiftIfNecessary(
- ir_builder_, lhs_value, rhs_value,
- ir_builder_->CreateLShr(lhs_value, rhs_value),
- /*saturate_to_sign_bit=*/false);
+ return SaturateShiftIfNecessary(b_, lhs_value, rhs_value,
+ b_->CreateLShr(lhs_value, rhs_value),
+ /*saturate_to_sign_bit=*/false);
default:
return Unimplemented("binary integer op '%s'",
HloOpcodeString(op->opcode()).c_str());
@@ -1196,21 +1168,19 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitIntegerBinaryOp(
llvm::Value* ElementalIrEmitter::EmitIntegralMax(llvm::Value* lhs_value,
llvm::Value* rhs_value,
bool is_signed) const {
- return ir_builder_->CreateSelect(
- ir_builder_->CreateICmp(
- is_signed ? llvm::ICmpInst::ICMP_SGE : llvm::ICmpInst::ICMP_UGE,
- lhs_value, rhs_value),
- lhs_value, rhs_value);
+ return b_->CreateSelect(b_->CreateICmp(is_signed ? llvm::ICmpInst::ICMP_SGE
+ : llvm::ICmpInst::ICMP_UGE,
+ lhs_value, rhs_value),
+ lhs_value, rhs_value);
}
llvm::Value* ElementalIrEmitter::EmitIntegralMin(llvm::Value* lhs_value,
llvm::Value* rhs_value,
bool is_signed) const {
- return ir_builder_->CreateSelect(
- ir_builder_->CreateICmp(
- is_signed ? llvm::ICmpInst::ICMP_SLE : llvm::ICmpInst::ICMP_ULE,
- lhs_value, rhs_value),
- lhs_value, rhs_value);
+ return b_->CreateSelect(b_->CreateICmp(is_signed ? llvm::ICmpInst::ICMP_SLE
+ : llvm::ICmpInst::ICMP_ULE,
+ lhs_value, rhs_value),
+ lhs_value, rhs_value);
}
llvm_ir::IrArray::Index ElementalIrEmitter::ElementwiseSourceIndex(
@@ -1263,10 +1233,10 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator(
// Same values as PCG library
// https://github.com/imneme/pcg-c/blob/master/include/pcg_variants.h
- llvm::Value* multiplier = ir_builder_->getInt(
- llvm::APInt(128, {0x4385DF649FCCF645, 0x2360ED051FC65DA4}));
- llvm::Value* increment = ir_builder_->getInt(
- llvm::APInt(128, {0x14057B7EF767814F, 0x5851F42D4C957F2D}));
+ llvm::Value* multiplier =
+ b_->getInt(llvm::APInt(128, {0x4385DF649FCCF645, 0x2360ED051FC65DA4}));
+ llvm::Value* increment =
+ b_->getInt(llvm::APInt(128, {0x14057B7EF767814F, 0x5851F42D4C957F2D}));
auto random_value_from_hlo = [hlo]() {
const HloModule* module =
@@ -1287,10 +1257,10 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator(
// values.
llvm::GlobalVariable* state_ptr0 = new llvm::GlobalVariable(
/*M=*/*module_,
- /*Ty=*/ir_builder_->getInt64Ty(),
+ /*Ty=*/b_->getInt64Ty(),
/*isConstant=*/false,
/*Linkage=*/llvm::GlobalValue::PrivateLinkage,
- /*Initializer=*/ir_builder_->getInt64(random_value_from_hlo()),
+ /*Initializer=*/b_->getInt64(random_value_from_hlo()),
/*Name=*/"state_ptr0");
// When the module config seed is 0, the expected result of a prng is a random
@@ -1301,17 +1271,16 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator(
: GlobalRandomValue();
llvm::GlobalVariable* state_ptr1 = new llvm::GlobalVariable(
/*M=*/*module_,
- /*Ty=*/ir_builder_->getInt64Ty(),
+ /*Ty=*/b_->getInt64Ty(),
/*isConstant=*/false,
/*Linkage=*/llvm::GlobalValue::PrivateLinkage,
- /*Initializer=*/ir_builder_->getInt64(graph_seed),
+ /*Initializer=*/b_->getInt64(graph_seed),
/*Name=*/"state_ptr1");
// We want each thread to use its own stream, so we modify the increment per
// thread. We want the increment to remain odd, so we shift the thread id left
// 1 and add it to the increment.
- increment = ir_builder_->CreateAdd(increment,
- ir_builder_->CreateShl(EmitThreadId(), 1));
+ increment = b_->CreateAdd(increment, b_->CreateShl(EmitThreadId(), 1));
// PCG-XSL-RR algorithm
// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf
@@ -1319,38 +1288,29 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator(
// return uint64_t(state ^ (state >> 64))) >>> (state >> 122)
// where ">>>" is bitwise rotation
auto get_next_i64 = [=]() {
- llvm::Value* state0 = ir_builder_->CreateZExtOrTrunc(
- ir_builder_->CreateLoad(state_ptr0, "state0"),
- ir_builder_->getInt128Ty());
- llvm::Value* state1 = ir_builder_->CreateShl(
- ir_builder_->CreateZExtOrTrunc(
- ir_builder_->CreateLoad(state_ptr1, "state1"),
- ir_builder_->getInt128Ty()),
+ llvm::Value* state0 = b_->CreateZExtOrTrunc(
+ b_->CreateLoad(state_ptr0, "state0"), b_->getInt128Ty());
+ llvm::Value* state1 = b_->CreateShl(
+ b_->CreateZExtOrTrunc(b_->CreateLoad(state_ptr1, "state1"),
+ b_->getInt128Ty()),
64);
- llvm::Value* state = ir_builder_->CreateOr(state0, state1);
- llvm::Value* updated = ir_builder_->CreateAdd(
- ir_builder_->CreateMul(state, multiplier), increment);
- ir_builder_->CreateStore(
- ir_builder_->CreateTrunc(updated, ir_builder_->getInt64Ty()),
- state_ptr0);
- ir_builder_->CreateStore(
- ir_builder_->CreateTrunc(ir_builder_->CreateLShr(updated, 64),
- ir_builder_->getInt64Ty()),
+ llvm::Value* state = b_->CreateOr(state0, state1);
+ llvm::Value* updated =
+ b_->CreateAdd(b_->CreateMul(state, multiplier), increment);
+ b_->CreateStore(b_->CreateTrunc(updated, b_->getInt64Ty()), state_ptr0);
+ b_->CreateStore(
+ b_->CreateTrunc(b_->CreateLShr(updated, 64), b_->getInt64Ty()),
state_ptr1);
return llvm_ir::CreateRor(
- ir_builder_->CreateTrunc(
- ir_builder_->CreateXor(state, ir_builder_->CreateLShr(state, 64)),
- ir_builder_->getInt64Ty()),
- ir_builder_->CreateTrunc(ir_builder_->CreateLShr(state, 122),
- ir_builder_->getInt64Ty()),
- ir_builder_);
+ b_->CreateTrunc(b_->CreateXor(state, b_->CreateLShr(state, 64)),
+ b_->getInt64Ty()),
+ b_->CreateTrunc(b_->CreateLShr(state, 122), b_->getInt64Ty()), b_);
};
auto get_next_uniform_float = [=]() {
- return ir_builder_->CreateFDiv(
- ir_builder_->CreateUIToFP(get_next_i64(), param_ir_type),
- llvm::ConstantFP::get(param_ir_type, 0x1p64));
+ return b_->CreateFDiv(b_->CreateUIToFP(get_next_i64(), param_ir_type),
+ llvm::ConstantFP::get(param_ir_type, 0x1p64));
};
return [=](const llvm_ir::IrArray::Index& index) -> StatusOr<llvm::Value*> {
@@ -1361,52 +1321,50 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator(
TF_ASSIGN_OR_RETURN(llvm::Value * q,
operand_to_generator.at(hlo->operand(1))(index));
if (primitive_util::IsFloatingPointType(param_prim_type)) {
- return ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(ir_builder_->CreateFSub(q, p),
- get_next_uniform_float()),
+ return b_->CreateFAdd(
+ b_->CreateFMul(b_->CreateFSub(q, p), get_next_uniform_float()),
p);
} else {
- auto r = ir_builder_->CreateSub(q, p);
+ auto r = b_->CreateSub(q, p);
auto leading_zeros = llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::ctlz, {r, ir_builder_->getInt1(true)},
- {param_ir_type}, ir_builder_);
- auto in_block = ir_builder_->GetInsertBlock();
+ llvm::Intrinsic::ctlz, {r, b_->getInt1(true)}, {param_ir_type},
+ b_);
+ auto in_block = b_->GetInsertBlock();
// A terminator should be present iff we're emitting code
// into the middle (as opposed to the end) of a basic block.
- CHECK_EQ(ir_builder_->GetInsertPoint() == in_block->end(),
+ CHECK_EQ(b_->GetInsertPoint() == in_block->end(),
in_block->getTerminator() == nullptr);
llvm::BasicBlock* body_block;
llvm::BasicBlock* out_block;
- if (ir_builder_->GetInsertPoint() == in_block->end()) {
- body_block = llvm_ir::CreateBasicBlock(
- nullptr, IrName(hlo, "rng_body"), ir_builder_);
- out_block = llvm_ir::CreateBasicBlock(
- nullptr, IrName(hlo, "rng_out"), ir_builder_);
+ if (b_->GetInsertPoint() == in_block->end()) {
+ body_block =
+ llvm_ir::CreateBasicBlock(nullptr, IrName(hlo, "rng_body"), b_);
+ out_block =
+ llvm_ir::CreateBasicBlock(nullptr, IrName(hlo, "rng_out"), b_);
llvm::BranchInst::Create(body_block, in_block);
} else {
- body_block = in_block->splitBasicBlock(
- ir_builder_->GetInsertPoint(), "rng_body");
- out_block = body_block->splitBasicBlock(
- ir_builder_->GetInsertPoint(), "rng_out");
+ body_block =
+ in_block->splitBasicBlock(b_->GetInsertPoint(), "rng_body");
+ out_block =
+ body_block->splitBasicBlock(b_->GetInsertPoint(), "rng_out");
body_block->getTerminator()->eraseFromParent();
}
- SetToFirstInsertPoint(body_block, ir_builder_);
- auto random = ir_builder_->CreateAnd(
- ir_builder_->CreateZExtOrTrunc(get_next_i64(), param_ir_type),
- ir_builder_->CreateLShr(llvm::ConstantInt::get(param_ir_type, ~0),
- leading_zeros));
+ SetToFirstInsertPoint(body_block, b_);
+ auto random = b_->CreateAnd(
+ b_->CreateZExtOrTrunc(get_next_i64(), param_ir_type),
+ b_->CreateLShr(llvm::ConstantInt::get(param_ir_type, ~0),
+ leading_zeros));
llvm::BranchInst::Create(out_block, body_block,
- ir_builder_->CreateICmpULT(random, r),
- body_block);
- SetToFirstInsertPoint(out_block, ir_builder_);
- return ir_builder_->CreateAdd(
- p, ir_builder_->CreateSelect(
- ir_builder_->CreateICmpEQ(p, q),
- llvm::ConstantInt::get(param_ir_type, 0), random));
+ b_->CreateICmpULT(random, r), body_block);
+ SetToFirstInsertPoint(out_block, b_);
+ return b_->CreateAdd(
+ p, b_->CreateSelect(b_->CreateICmpEQ(p, q),
+ llvm::ConstantInt::get(param_ir_type, 0),
+ random));
}
}
case RNG_NORMAL: {
@@ -1416,11 +1374,11 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeRngElementGenerator(
operand_to_generator.at(hlo->operand(1))(index));
TF_ASSIGN_OR_RETURN(
llvm::Value * r,
- EmitErfcInv(param_prim_type,
- ir_builder_->CreateFMul(
- llvm::ConstantFP::get(param_ir_type, 2.0),
- get_next_uniform_float())));
- return ir_builder_->CreateFAdd(ir_builder_->CreateFMul(r, s), m);
+ EmitErfcInv(
+ param_prim_type,
+ b_->CreateFMul(llvm::ConstantFP::get(param_ir_type, 2.0),
+ get_next_uniform_float())));
+ return b_->CreateFAdd(b_->CreateFMul(r, s), m);
}
default:
return InvalidArgument(
@@ -1443,9 +1401,8 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalSelect(
TF_ASSIGN_OR_RETURN(llvm::Value * on_false_value,
operand_to_generator.at(hlo->operand(2))(
ElementwiseSourceIndex(index, *hlo, 2)));
- return ir_builder_->CreateSelect(
- ir_builder_->CreateTrunc(pred_value, ir_builder_->getInt1Ty()),
- on_true_value, on_false_value);
+ return b_->CreateSelect(b_->CreateTrunc(pred_value, b_->getInt1Ty()),
+ on_true_value, on_false_value);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalClamp(
@@ -1481,64 +1438,62 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalConcatenate(
const int64 concat_dim = hlo->dimensions(0);
auto source_index = target_index;
- llvm::BasicBlock* init_block = ir_builder_->GetInsertBlock();
+ llvm::BasicBlock* init_block = b_->GetInsertBlock();
// A terminator should be present iff we're emitting code
// into the middle (as opposed to the end) of a basic block.
- CHECK_EQ(ir_builder_->GetInsertPoint() == init_block->end(),
+ CHECK_EQ(b_->GetInsertPoint() == init_block->end(),
init_block->getTerminator() == nullptr);
llvm::BasicBlock* exit_block;
- if (ir_builder_->GetInsertPoint() == init_block->end()) {
+ if (b_->GetInsertPoint() == init_block->end()) {
exit_block = llvm_ir::CreateBasicBlock(
- /*insert_before=*/nullptr, IrName(hlo, "merge"), ir_builder_);
+ /*insert_before=*/nullptr, IrName(hlo, "merge"), b_);
} else {
- exit_block = init_block->splitBasicBlock(ir_builder_->GetInsertPoint(),
+ exit_block = init_block->splitBasicBlock(b_->GetInsertPoint(),
AsStringRef(IrName(hlo, "merge")));
init_block->getTerminator()->eraseFromParent();
}
- llvm_ir::SetToFirstInsertPoint(exit_block, ir_builder_);
- llvm::PHINode* output = ir_builder_->CreatePHI(
+ llvm_ir::SetToFirstInsertPoint(exit_block, b_);
+ llvm::PHINode* output = b_->CreatePHI(
llvm_ir::PrimitiveTypeToIrType(hlo->shape().element_type(), module_),
hlo->operands().size());
- auto prior_insert_point = ir_builder_->GetInsertPoint();
+ auto prior_insert_point = b_->GetInsertPoint();
- ir_builder_->SetInsertPoint(init_block);
+ b_->SetInsertPoint(init_block);
for (int64 operand_idx = 0; operand_idx < hlo->operand_count();
++operand_idx) {
const HloInstruction* operand = hlo->operand(operand_idx);
auto true_block = llvm_ir::CreateBasicBlock(
- exit_block, StrCat("concat_index_from_operand", operand_idx),
- ir_builder_);
+ exit_block, StrCat("concat_index_from_operand", operand_idx), b_);
auto false_block = llvm_ir::CreateBasicBlock(
- exit_block, StrCat("concat_index_not_from_operand", operand_idx),
- ir_builder_);
+ exit_block, StrCat("concat_index_not_from_operand", operand_idx), b_);
auto concat_dim_size =
llvm::ConstantInt::get(source_index[concat_dim]->getType(),
operand->shape().dimensions(concat_dim));
- ir_builder_->CreateCondBr(
- ir_builder_->CreateICmpULT(source_index[concat_dim], concat_dim_size),
+ b_->CreateCondBr(
+ b_->CreateICmpULT(source_index[concat_dim], concat_dim_size),
true_block, false_block);
// Create the terminator of the true block before calling operand
// generators, because they require non-degenerate basic blocks.
- ir_builder_->SetInsertPoint(
+ b_->SetInsertPoint(
llvm::BranchInst::Create(exit_block, /*InsertAtEnd=*/true_block));
TF_ASSIGN_OR_RETURN(llvm::Value * value,
operand_to_generator.at(operand)(source_index));
- output->addIncoming(value, ir_builder_->GetInsertBlock());
+ output->addIncoming(value, b_->GetInsertBlock());
// Subtract the size of the concat dimension of the current operand
// from the source index.
- ir_builder_->SetInsertPoint(false_block);
+ b_->SetInsertPoint(false_block);
source_index[concat_dim] =
- ir_builder_->CreateSub(source_index[concat_dim], concat_dim_size);
+ b_->CreateSub(source_index[concat_dim], concat_dim_size);
}
- ir_builder_->CreateUnreachable();
- ir_builder_->SetInsertPoint(exit_block, prior_insert_point);
+ b_->CreateUnreachable();
+ b_->SetInsertPoint(exit_block, prior_insert_point);
return output;
}
@@ -1566,18 +1521,16 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicSlice(
// TODO(b/74360564): This is implementation defined behavior, but is
// currently respected by all implementations. Change this if we ever decide
// to officially document different behavior.
- start_index_value =
- ir_builder_->CreateSExtOrTrunc(start_index_value, index_type);
- llvm::Value* operand_dim_size =
- index_typed_const(input_hlo->shape().dimensions(i));
- llvm::Value* output_dim_size =
- index_typed_const(hlo->shape().dimensions(i));
+ start_index_value = b_->CreateSExtOrTrunc(start_index_value, index_type);
+ int64 largest_valid_start_index =
+ input_hlo->shape().dimensions(i) - hlo->shape().dimensions(i);
+ CHECK_GE(largest_valid_start_index, 0);
+ bool is_signed = ShapeUtil::ElementIsSigned(hlo->operand(1)->shape());
start_index_value = EmitIntegralMin(
- ir_builder_->CreateSub(operand_dim_size, output_dim_size),
- EmitIntegralMax(index_typed_const(0), start_index_value,
- /*is_signed=*/true),
- /*is_signed=*/true);
+ index_typed_const(largest_valid_start_index),
+ EmitIntegralMax(index_typed_const(0), start_index_value, is_signed),
+ is_signed);
start_index_value->setName(
AsStringRef(IrName(hlo, StrCat("start_idx", i))));
@@ -1588,7 +1541,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicSlice(
for (int64 i = 0; i < rank; ++i) {
// Emit IR which computes:
// input_index = start_index + offset_index
- input_index[i] = ir_builder_->CreateAdd(slice_start_index[i], index[i]);
+ input_index[i] = b_->CreateAdd(slice_start_index[i], index[i]);
}
return operand_to_generator.at(input_hlo)(input_index);
}
@@ -1646,7 +1599,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
auto add_to_operand_index = [&](llvm::Value* index_component, int64 dim) {
llvm::Value* gather_dim_component_extended =
- ir_builder_->CreateSExtOrTrunc(index_component, index_type);
+ b_->CreateSExtOrTrunc(index_component, index_type);
int64 operand_dim = dim_numbers.gather_dims_to_operand_dims(dim);
int64 output_dim = operand_to_output_dim[operand_dim];
// If 'output_dim' is -1, it means 'operand_dim' is an elided window dim.
@@ -1663,16 +1616,14 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalGather(
// clamp(gather_dim_component_extended, 0, largest_valid_start_index);
// TODO(b/111078873): This is implementation defined behavior.
-
bool is_signed = ShapeUtil::ElementIsSigned(indices_shape);
auto gather_dim_component_extended_inbound = EmitIntegralMin(
index.GetConstantWithIndexType(largest_valid_start_index),
EmitIntegralMax(index.GetConstantWithIndexType(0),
- gather_dim_component_extended,
- /*is_signed=*/is_signed),
- /*is_signed=*/is_signed);
+ gather_dim_component_extended, is_signed),
+ is_signed);
- operand_index[operand_dim] = ir_builder_->CreateAdd(
+ operand_index[operand_dim] = b_->CreateAdd(
operand_index[operand_dim], gather_dim_component_extended_inbound);
};
@@ -1707,7 +1658,7 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicUpdateSlice(
llvm_ir::IrArray::Index slice_limit_index(index.GetType(), rank);
// Slice intersection gathers (ANDs) conditions on all ranks for which
// 'input' is set to 'update'
- llvm::Value* slice_intersection = ir_builder_->getTrue();
+ llvm::Value* slice_intersection = b_->getTrue();
for (int64 i = 0; i < rank; ++i) {
llvm::Type* index_type = index[0]->getType();
@@ -1724,32 +1675,29 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicUpdateSlice(
// TODO(b/74360564): This is implementation defined behavior, but is
// currently respected by all implementations. Change this if we ever decide
// to officially document different behavior.
- start_index_value =
- ir_builder_->CreateSExtOrTrunc(start_index_value, index_type);
- llvm::Value* input_dim_size =
- index_typed_const(input_hlo->shape().dimensions(i));
+ start_index_value = b_->CreateSExtOrTrunc(start_index_value, index_type);
llvm::Value* update_dim_size =
index_typed_const(update_hlo->shape().dimensions(i));
+ int64 largest_valid_start_index =
+ input_hlo->shape().dimensions(i) - update_hlo->shape().dimensions(i);
+ CHECK_GE(largest_valid_start_index, 0);
- start_index_value =
- EmitIntegralMin(ir_builder_->CreateSub(input_dim_size, update_dim_size),
- EmitIntegralMax(index_typed_const(0), start_index_value,
- /*is_signed=*/true),
- /*is_signed=*/true);
+ bool is_signed = ShapeUtil::ElementIsSigned(start_hlo->shape());
+ start_index_value = EmitIntegralMin(
+ index_typed_const(largest_valid_start_index),
+ EmitIntegralMax(index_typed_const(0), start_index_value, is_signed),
+ is_signed);
start_index_value->setName(
AsStringRef(IrName(hlo, StrCat("start_idx", i))));
slice_start_index[i] = start_index_value;
- slice_limit_index[i] =
- ir_builder_->CreateAdd(slice_start_index[i], update_dim_size);
+ slice_limit_index[i] = b_->CreateAdd(slice_start_index[i], update_dim_size);
- slice_intersection = ir_builder_->CreateAnd(
- slice_intersection,
- ir_builder_->CreateICmpSGE(index[i], slice_start_index[i]),
+ slice_intersection = b_->CreateAnd(
+ slice_intersection, b_->CreateICmpSGE(index[i], slice_start_index[i]),
"slice_intersection");
- slice_intersection = ir_builder_->CreateAnd(
- slice_intersection,
- ir_builder_->CreateICmpSLT(index[i], slice_limit_index[i]),
+ slice_intersection = b_->CreateAnd(
+ slice_intersection, b_->CreateICmpSLT(index[i], slice_limit_index[i]),
"slice_intersection");
}
@@ -1758,29 +1706,29 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDynamicUpdateSlice(
// else -> return data from 'input'.
llvm::Value* ret_value_addr = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(hlo->shape().element_type(), module_),
- "ret_value_addr", ir_builder_);
- llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- slice_intersection, "slice_intersection", ir_builder_);
+ "ret_value_addr", b_);
+ llvm_ir::LlvmIfData if_data =
+ llvm_ir::EmitIfThenElse(slice_intersection, "slice_intersection", b_);
// Handle true BB (return data from 'update')
- SetToFirstInsertPoint(if_data.true_block, ir_builder_);
+ SetToFirstInsertPoint(if_data.true_block, b_);
// Compute update index for intersection case.
llvm_ir::IrArray::Index update_index(index.GetType(), rank);
for (int64 i = 0; i < rank; ++i) {
- update_index[i] = ir_builder_->CreateSub(index[i], slice_start_index[i]);
+ update_index[i] = b_->CreateSub(index[i], slice_start_index[i]);
}
TF_ASSIGN_OR_RETURN(llvm::Value * true_value,
operand_to_generator.at(update_hlo)(update_index));
- ir_builder_->CreateStore(true_value, ret_value_addr);
+ b_->CreateStore(true_value, ret_value_addr);
// Handle false BB (return data from 'input')
- SetToFirstInsertPoint(if_data.false_block, ir_builder_);
+ SetToFirstInsertPoint(if_data.false_block, b_);
TF_ASSIGN_OR_RETURN(llvm::Value * false_value,
operand_to_generator.at(input_hlo)(index));
- ir_builder_->CreateStore(false_value, ret_value_addr);
+ b_->CreateStore(false_value, ret_value_addr);
- SetToFirstInsertPoint(if_data.after_block, ir_builder_);
- return ir_builder_->CreateLoad(ret_value_addr);
+ SetToFirstInsertPoint(if_data.after_block, b_);
+ return b_->CreateLoad(ret_value_addr);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalPad(
@@ -1788,29 +1736,29 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalPad(
const ElementalIrEmitter::HloToElementGeneratorMap& operand_to_generator,
const llvm_ir::IrArray::Index& padded_index) const {
auto index = padded_index;
- llvm::Value* in_bounds = ir_builder_->getTrue();
+ llvm::Value* in_bounds = b_->getTrue();
for (size_t i = 0; i < index.size(); ++i) {
auto index_typed_const = [=](int64 n) {
return llvm::ConstantInt::get(index[i]->getType(), n);
};
const auto& pad_dim = hlo->padding_config().dimensions(i);
- index[i] = ir_builder_->CreateSub(
- index[i], index_typed_const(pad_dim.edge_padding_low()));
- in_bounds = ir_builder_->CreateAnd(
- in_bounds, ir_builder_->CreateICmpSGE(index[i], index_typed_const(0)),
- "in_bounds");
- in_bounds = ir_builder_->CreateAnd(
+ index[i] =
+ b_->CreateSub(index[i], index_typed_const(pad_dim.edge_padding_low()));
+ in_bounds = b_->CreateAnd(in_bounds,
+ b_->CreateICmpSGE(index[i], index_typed_const(0)),
+ "in_bounds");
+ in_bounds = b_->CreateAnd(
in_bounds,
- ir_builder_->CreateICmpEQ(
+ b_->CreateICmpEQ(
index_typed_const(0),
- ir_builder_->CreateURem(
- index[i], index_typed_const(pad_dim.interior_padding() + 1))),
+ b_->CreateURem(index[i],
+ index_typed_const(pad_dim.interior_padding() + 1))),
"in_bounds");
- index[i] = ir_builder_->CreateSDiv(
+ index[i] = b_->CreateSDiv(
index[i], index_typed_const(pad_dim.interior_padding() + 1));
- in_bounds = ir_builder_->CreateAnd(
+ in_bounds = b_->CreateAnd(
in_bounds,
- ir_builder_->CreateICmpSLT(
+ b_->CreateICmpSLT(
index[i],
index_typed_const(hlo->operand(0)->shape().dimensions(i))),
"in_bounds");
@@ -1823,26 +1771,26 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalPad(
// }
llvm::Value* ret_value_addr = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(hlo->shape().element_type(), module_),
- "pad_result_addr", ir_builder_);
+ "pad_result_addr", b_);
llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(in_bounds, "in_bounds", ir_builder_);
- SetToFirstInsertPoint(if_data.true_block, ir_builder_);
+ llvm_ir::EmitIfThenElse(in_bounds, "in_bounds", b_);
+ SetToFirstInsertPoint(if_data.true_block, b_);
TF_ASSIGN_OR_RETURN(llvm::Value * operand_value,
operand_to_generator.at(hlo->operand(0))(index));
- ir_builder_->CreateStore(operand_value, ret_value_addr);
+ b_->CreateStore(operand_value, ret_value_addr);
- SetToFirstInsertPoint(if_data.false_block, ir_builder_);
+ SetToFirstInsertPoint(if_data.false_block, b_);
TF_ASSIGN_OR_RETURN(llvm::Value * padding_value,
operand_to_generator.at(hlo->operand(1))(
IrArray::Index(index.GetType())));
- ir_builder_->CreateStore(padding_value, ret_value_addr);
+ b_->CreateStore(padding_value, ret_value_addr);
- SetToFirstInsertPoint(if_data.after_block, ir_builder_);
+ SetToFirstInsertPoint(if_data.after_block, b_);
// Don't create phi(operand_value, padding_value) here, because invoking
// operand_to_generator may create new basic blocks, making the parent
// of operand_value or padding_value no longer a predecessor of
// if_data.after_block.
- return ir_builder_->CreateLoad(ret_value_addr);
+ return b_->CreateLoad(ret_value_addr);
}
StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDot(
@@ -1866,21 +1814,20 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDot(
return llvm::ConstantInt::get(index_type, c);
};
- std::unique_ptr<llvm_ir::ForLoop> inner_loop =
- llvm_ir::ForLoop::EmitForLoop(IrName(hlo, "inner"), index_typed_const(0),
- index_typed_const(contracted_dim_size),
- index_typed_const(1), ir_builder_);
+ std::unique_ptr<llvm_ir::ForLoop> inner_loop = llvm_ir::ForLoop::EmitForLoop(
+ IrName(hlo, "inner"), index_typed_const(0),
+ index_typed_const(contracted_dim_size), index_typed_const(1), b_);
- SetToFirstInsertPoint(inner_loop->GetPreheaderBasicBlock(), ir_builder_);
+ SetToFirstInsertPoint(inner_loop->GetPreheaderBasicBlock(), b_);
PrimitiveType primitive_type = hlo->shape().element_type();
llvm::Type* primitive_type_llvm =
llvm_ir::PrimitiveTypeToIrType(primitive_type, module_);
- llvm::Value* accumulator_alloca = llvm_ir::EmitAllocaAtFunctionEntry(
- primitive_type_llvm, "dot_acc", ir_builder_);
- ir_builder_->CreateStore(llvm::Constant::getNullValue(primitive_type_llvm),
- accumulator_alloca);
+ llvm::Value* accumulator_alloca =
+ llvm_ir::EmitAllocaAtFunctionEntry(primitive_type_llvm, "dot_acc", b_);
+ b_->CreateStore(llvm::Constant::getNullValue(primitive_type_llvm),
+ accumulator_alloca);
- SetToFirstInsertPoint(inner_loop->GetBodyBasicBlock(), ir_builder_);
+ SetToFirstInsertPoint(inner_loop->GetBodyBasicBlock(), b_);
// This is the inner reduction loop for a dot operation that produces
// one element in the output. If the operands to the dot operation have
@@ -1900,43 +1847,36 @@ StatusOr<llvm::Value*> ElementalIrEmitter::EmitElementalDot(
}
rhs_index.InsertAt(rhs_contracting_dim, inner_loop->GetIndVarValue());
- llvm::Value* current_accumulator =
- ir_builder_->CreateLoad(accumulator_alloca);
+ llvm::Value* current_accumulator = b_->CreateLoad(accumulator_alloca);
TF_ASSIGN_OR_RETURN(llvm::Value * lhs_value, lhs_generator(lhs_index));
TF_ASSIGN_OR_RETURN(llvm::Value * rhs_value, rhs_generator(rhs_index));
llvm::Value* next_accumulator;
if (primitive_util::IsComplexType(primitive_type)) {
- llvm::Value* product_real = ir_builder_->CreateFSub(
- ir_builder_->CreateFMul(EmitExtractReal(lhs_value),
- EmitExtractReal(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractImag(lhs_value),
- EmitExtractImag(rhs_value)));
- llvm::Value* product_imag = ir_builder_->CreateFAdd(
- ir_builder_->CreateFMul(EmitExtractReal(lhs_value),
- EmitExtractImag(rhs_value)),
- ir_builder_->CreateFMul(EmitExtractImag(lhs_value),
- EmitExtractReal(rhs_value)));
- next_accumulator = ir_builder_->CreateInsertValue(
+ llvm::Value* product_real = b_->CreateFSub(
+ b_->CreateFMul(EmitExtractReal(lhs_value), EmitExtractReal(rhs_value)),
+ b_->CreateFMul(EmitExtractImag(lhs_value), EmitExtractImag(rhs_value)));
+ llvm::Value* product_imag = b_->CreateFAdd(
+ b_->CreateFMul(EmitExtractReal(lhs_value), EmitExtractImag(rhs_value)),
+ b_->CreateFMul(EmitExtractImag(lhs_value), EmitExtractReal(rhs_value)));
+ next_accumulator = b_->CreateInsertValue(
current_accumulator,
- ir_builder_->CreateFAdd(EmitExtractReal(current_accumulator),
- product_real),
+ b_->CreateFAdd(EmitExtractReal(current_accumulator), product_real),
{0});
- next_accumulator = ir_builder_->CreateInsertValue(
+ next_accumulator = b_->CreateInsertValue(
next_accumulator,
- ir_builder_->CreateFAdd(EmitExtractImag(current_accumulator),
- product_imag),
+ b_->CreateFAdd(EmitExtractImag(current_accumulator), product_imag),
{1});
} else if (primitive_util::IsFloatingPointType(primitive_type)) {
- next_accumulator = ir_builder_->CreateFAdd(
- current_accumulator, ir_builder_->CreateFMul(lhs_value, rhs_value));
+ next_accumulator = b_->CreateFAdd(current_accumulator,
+ b_->CreateFMul(lhs_value, rhs_value));
} else {
- next_accumulator = ir_builder_->CreateAdd(
- current_accumulator, ir_builder_->CreateMul(lhs_value, rhs_value));
+ next_accumulator =
+ b_->CreateAdd(current_accumulator, b_->CreateMul(lhs_value, rhs_value));
}
- ir_builder_->CreateStore(next_accumulator, accumulator_alloca);
+ b_->CreateStore(next_accumulator, accumulator_alloca);
- SetToFirstInsertPoint(inner_loop->GetExitBasicBlock(), ir_builder_);
- return ir_builder_->CreateLoad(accumulator_alloca);
+ SetToFirstInsertPoint(inner_loop->GetExitBasicBlock(), b_);
+ return b_->CreateLoad(accumulator_alloca);
}
llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
@@ -2036,7 +1976,7 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
const HloInstruction* operand = hlo->operand(0);
auto source_index = target_index;
for (int64 dim : hlo->dimensions()) {
- source_index[dim] = ir_builder_->CreateSub(
+ source_index[dim] = b_->CreateSub(
llvm::ConstantInt::get(target_index[dim]->getType(),
hlo->shape().dimensions(dim) - 1),
target_index[dim]);
@@ -2049,16 +1989,16 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
const HloInstruction* operand = hlo->operand(0);
// The `dimensions` member of the broadcast instruction maps from
// input dimensions to output dimensions.
- return operand_to_generator.at(
- operand)(target_index.SourceIndexOfBroadcast(
- hlo->shape(), operand->shape(), hlo->dimensions(), ir_builder_));
+ return operand_to_generator.at(operand)(
+ target_index.SourceIndexOfBroadcast(hlo->shape(), operand->shape(),
+ hlo->dimensions(), b_));
};
case HloOpcode::kSlice:
return [this, hlo, &operand_to_generator](
const IrArray::Index& index) -> StatusOr<llvm::Value*> {
IrArray::Index sliced_index = index.SourceIndexOfSlice(
/*shape=*/hlo->shape(), /*starts=*/hlo->slice_starts(),
- /*strides=*/hlo->slice_strides(), /*builder=*/ir_builder_);
+ /*strides=*/hlo->slice_strides(), /*builder=*/b_);
return operand_to_generator.at(hlo->operand(0))(sliced_index);
};
case HloOpcode::kDynamicSlice:
@@ -2083,24 +2023,23 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
ShapeUtil::ElementsIn(hlo->operand(0)->shape()));
return [this, hlo, &operand_to_generator](const IrArray::Index& index) {
const HloInstruction* operand = hlo->operand(0);
- return operand_to_generator.at(operand)(index.SourceIndexOfBitcast(
- hlo->shape(), operand->shape(), ir_builder_));
+ return operand_to_generator.at(operand)(
+ index.SourceIndexOfBitcast(hlo->shape(), operand->shape(), b_));
};
case HloOpcode::kReshape:
CHECK_EQ(ShapeUtil::ElementsIn(hlo->shape()),
ShapeUtil::ElementsIn(hlo->operand(0)->shape()));
return [this, hlo, &operand_to_generator](const IrArray::Index& index) {
const HloInstruction* operand = hlo->operand(0);
- return operand_to_generator.at(operand)(index.SourceIndexOfReshape(
- hlo->shape(), operand->shape(), ir_builder_));
+ return operand_to_generator.at(operand)(
+ index.SourceIndexOfReshape(hlo->shape(), operand->shape(), b_));
};
case HloOpcode::kTranspose:
return [this, hlo,
&operand_to_generator](const IrArray::Index& target_index) {
return operand_to_generator.at(hlo->operand(0))(
target_index.SourceIndexOfTranspose(
- hlo->shape(), hlo->operand(0)->shape(), hlo->dimensions(),
- ir_builder_));
+ hlo->shape(), hlo->operand(0)->shape(), hlo->dimensions(), b_));
};
case HloOpcode::kRng:
return MakeRngElementGenerator(hlo, operand_to_generator);
@@ -2125,11 +2064,11 @@ llvm_ir::ElementGenerator ElementalIrEmitter::MakeElementGenerator(
}
llvm::Value* ElementalIrEmitter::EmitExtractReal(llvm::Value* value) const {
- return ir_builder_->CreateExtractValue(value, {0});
+ return b_->CreateExtractValue(value, {0});
}
llvm::Value* ElementalIrEmitter::EmitExtractImag(llvm::Value* value) const {
- return ir_builder_->CreateExtractValue(value, {1});
+ return b_->CreateExtractValue(value, {1});
}
llvm::Value* ElementalIrEmitter::EmitComposeComplex(const HloInstruction* op,
@@ -2137,10 +2076,10 @@ llvm::Value* ElementalIrEmitter::EmitComposeComplex(const HloInstruction* op,
llvm::Value* imag) const {
auto cplx_type =
llvm_ir::PrimitiveTypeToIrType(op->shape().element_type(), module_);
- auto complex = ir_builder_->CreateInsertValue(
+ auto complex = b_->CreateInsertValue(
llvm::ConstantAggregateZero::get(cplx_type), real, {0});
if (imag != nullptr) {
- complex = ir_builder_->CreateInsertValue(complex, imag, {1});
+ complex = b_->CreateInsertValue(complex, imag, {1});
}
return complex;
}
diff --git a/tensorflow/compiler/xla/service/elemental_ir_emitter.h b/tensorflow/compiler/xla/service/elemental_ir_emitter.h
index d199473374..deba6bea0a 100644
--- a/tensorflow/compiler/xla/service/elemental_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/elemental_ir_emitter.h
@@ -34,10 +34,8 @@ class ElementalIrEmitter {
std::unordered_map<const HloInstruction*, llvm_ir::ElementGenerator>;
ElementalIrEmitter(const HloModuleConfig& hlo_module_config,
- llvm::Module* module, llvm::IRBuilder<>* ir_builder)
- : ir_builder_(ir_builder),
- module_(module),
- hlo_module_config_(hlo_module_config) {}
+ llvm::Module* module, llvm::IRBuilder<>* b)
+ : b_(b), module_(module), hlo_module_config_(hlo_module_config) {}
virtual ~ElementalIrEmitter() = default;
@@ -54,7 +52,7 @@ class ElementalIrEmitter {
const HloInstruction* hlo,
const HloToElementGeneratorMap& operand_to_generator) const;
- llvm::IRBuilder<>* ir_builder() const { return ir_builder_; }
+ llvm::IRBuilder<>* b() const { return b_; }
llvm::Module* module() const { return module_; }
protected:
@@ -144,9 +142,7 @@ class ElementalIrEmitter {
int64 operand_no) const;
// Identifier of the thread unique among all threads on the device
- virtual llvm::Value* EmitThreadId() const {
- return ir_builder_->getIntN(128, 0);
- }
+ virtual llvm::Value* EmitThreadId() const { return b_->getIntN(128, 0); }
StatusOr<llvm::Value*> EmitElementalSelect(
const HloInstruction* hlo,
@@ -188,7 +184,7 @@ class ElementalIrEmitter {
const HloToElementGeneratorMap& operand_to_generator,
const llvm_ir::IrArray::Index& dot_result_index) const;
- llvm::IRBuilder<>* const ir_builder_;
+ llvm::IRBuilder<>* const b_;
llvm::Module* module_;
diff --git a/tensorflow/compiler/xla/service/executable.cc b/tensorflow/compiler/xla/service/executable.cc
index 7cf2746947..fd75847d0c 100644
--- a/tensorflow/compiler/xla/service/executable.cc
+++ b/tensorflow/compiler/xla/service/executable.cc
@@ -82,7 +82,18 @@ StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStreamWrapper(
StatusOr<ScopedShapedBuffer> return_value =
ExecuteOnStream(run_options, arguments, profile_ptr.get());
- TF_RETURN_IF_ERROR(return_value.status());
+ if (!return_value.status().ok()) {
+ if (profile != nullptr) {
+ // Ensure the ThenStartTimer call has completed before we destroy timer.
+ // We already have a failure status to return, so just log this if it
+ // fails.
+ Status status = stream->BlockHostUntilDone();
+ if (!status.ok()) {
+ LOG(ERROR) << "Failed to BlockHostUntilDone: " << status;
+ }
+ }
+ return return_value.status();
+ }
if (profile != nullptr) {
VLOG(1) << "enqueueing 'stop timer' and blocking host until done...";
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.cc b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
index 33730049c4..e314a469f0 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.cc
@@ -158,11 +158,6 @@ Status GenericTransferManager::TransferLiteralToInfeed(
return Unimplemented("Generic transfer to Infeed");
}
-Status GenericTransferManager::TransferBufferToInfeed(
- se::StreamExecutor* executor, int64 size, const void* source) {
- return Unimplemented("Generic transfer to Infeed");
-}
-
Status GenericTransferManager::TransferLiteralFromOutfeed(
se::StreamExecutor* executor, const Shape& literal_shape,
Literal* literal) {
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.h b/tensorflow/compiler/xla/service/generic_transfer_manager.h
index d216fe7d29..3cd002c1bf 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.h
@@ -61,9 +61,6 @@ class GenericTransferManager : public TransferManager {
int64 GetByteSizeRequirement(const Shape& shape) const override;
protected:
- Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
- const void* source) override;
-
Status WriteSingleTupleIndexTable(
se::Stream* stream,
tensorflow::gtl::ArraySlice<se::DeviceMemoryBase> elements,
diff --git a/tensorflow/compiler/xla/service/gpu/BUILD b/tensorflow/compiler/xla/service/gpu/BUILD
index 9fca3a51c8..72aff197fc 100644
--- a/tensorflow/compiler/xla/service/gpu/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/BUILD
@@ -36,6 +36,7 @@ cc_library(
hdrs = ["gpu_constants.h"],
deps = [
"//tensorflow/compiler/xla:types",
+ "//tensorflow/core:framework",
],
)
@@ -162,6 +163,7 @@ cc_library(
"//tensorflow/compiler/xla/service:elemental_ir_emitter",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:name_uniquer",
+ "//tensorflow/compiler/xla/service/llvm_ir:dynamic_update_slice_util",
"//tensorflow/compiler/xla/service/llvm_ir:fused_ir_emitter",
"//tensorflow/compiler/xla/service/llvm_ir:ir_array",
"//tensorflow/compiler/xla/service/llvm_ir:kernel_support_library",
@@ -169,7 +171,7 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:llvm_loop",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/compiler/xla/service/llvm_ir:loop_emitter",
- "//tensorflow/compiler/xla/service/llvm_ir:ops",
+ "//tensorflow/compiler/xla/service/llvm_ir:sort_util",
"//tensorflow/compiler/xla/service/llvm_ir:tuple_ops",
"//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
@@ -268,6 +270,7 @@ cc_library(
"memset_thunk.cc",
"outfeed_thunk.cc",
"sequential_thunk.cc",
+ "thunk.cc",
"thunk_schedule.cc",
"tuple_thunk.cc",
"while_thunk.cc",
@@ -542,6 +545,7 @@ cc_library(
":outfeed_manager",
"//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
@@ -560,8 +564,8 @@ cc_library(
cc_library(
name = "gpu_compiler",
- srcs = ["gpu_compiler.cc"],
- hdrs = ["gpu_compiler.h"],
+ srcs = ["nvptx_compiler.cc"],
+ hdrs = ["nvptx_compiler.h"],
deps = [
":cudnn_convolution_algorithm_picker",
":cudnn_convolution_rewriter",
@@ -640,13 +644,20 @@ cc_library(
)
cc_library(
+ name = "xfeed_queue",
+ hdrs = ["xfeed_queue.h"],
+ deps = ["//tensorflow/core:lib"],
+)
+
+cc_library(
name = "infeed_manager",
srcs = ["infeed_manager.cc"],
hdrs = ["infeed_manager.h"],
deps = [
+ ":xfeed_queue",
+ "//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
- "//tensorflow/core:lib",
"//tensorflow/core:stream_executor_no_cuda",
],
)
@@ -656,6 +667,7 @@ cc_library(
srcs = ["outfeed_manager.cc"],
hdrs = ["outfeed_manager.h"],
deps = [
+ ":xfeed_queue",
"//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:shape_tree",
"//tensorflow/compiler/xla:shape_util",
diff --git a/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc b/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc
index ab5149dcdb..b095d4cd73 100644
--- a/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc
+++ b/tensorflow/compiler/xla/service/gpu/buffer_allocations.cc
@@ -49,12 +49,12 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
if (registered_buffers_.count(i)) {
se::DeviceMemoryBase address = FindOrDie(registered_buffers_, i);
if (reinterpret_cast<uintptr_t>(address.opaque()) %
- kCudaMallocAlignBytes !=
+ kEntryParameterAlignBytes !=
0) {
return InternalError(
"Address of registered buffer %lld must be a multiple of %llx, but "
"was %p",
- i, kCudaMallocAlignBytes, address.opaque());
+ i, kEntryParameterAlignBytes, address.opaque());
}
buffer_allocations->SetBuffer(i, FindOrDie(registered_buffers_, i));
continue;
@@ -71,12 +71,12 @@ StatusOr<std::unique_ptr<BufferAllocations>> BufferAllocations::Builder::Build(
TF_ASSIGN_OR_RETURN(
buffer, memory_allocator->Allocate(device_ordinal, buffer_size));
if (reinterpret_cast<uintptr_t>(buffer.opaque()) %
- kCudaMallocAlignBytes !=
+ kXlaAllocatedBufferAlignBytes !=
0) {
return InternalError(
"Address returned by memory_allocator->Allocate must be a "
"multiple of %llx, but was %p",
- kCudaMallocAlignBytes, buffer.opaque());
+ kXlaAllocatedBufferAlignBytes, buffer.opaque());
}
// We do manual memory management within BufferAllocations. Be sure not
// to do a TF_RETURN_IF_ERROR between this line and the
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
index e594cec2f8..b97a627d9b 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
@@ -67,8 +67,8 @@ bool IsFPLiteralWithValue(const HloInstruction* operand, float value) {
GpuElementalIrEmitter::GpuElementalIrEmitter(
const HloModuleConfig& hlo_module_config, llvm::Module* module,
- llvm::IRBuilder<>* ir_builder, NestedComputer compute_nested)
- : ElementalIrEmitter(hlo_module_config, module, ir_builder),
+ llvm::IRBuilder<>* b, NestedComputer compute_nested)
+ : ElementalIrEmitter(hlo_module_config, module, b),
hlo_module_config_(hlo_module_config),
compute_nested_(std::move(compute_nested)) {}
@@ -92,8 +92,8 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLibdeviceMathCall(
cast_result_to_fp16 = true;
for (int64 i = 0; i < operands.size(); ++i) {
if (input_types[i] == F16) {
- converted_operands[i] = ir_builder_->CreateFPCast(
- converted_operands[i], ir_builder_->getFloatTy());
+ converted_operands[i] =
+ b_->CreateFPCast(converted_operands[i], b_->getFloatTy());
converted_input_types[i] = F32;
}
}
@@ -112,7 +112,7 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitLibdeviceMathCall(
converted_input_types, output_type)
.ValueOrDie();
if (cast_result_to_fp16) {
- result = ir_builder_->CreateFPCast(result, ir_builder_->getHalfTy());
+ result = b_->CreateFPCast(result, b_->getHalfTy());
}
return result;
}
@@ -215,7 +215,7 @@ StatusOr<llvm::Value*> GpuElementalIrEmitter::EmitPowerOp(
// LLVM's NVPTX backend knows how to transform 1/sqrt(A) into the NVPTX
// rsqrt.approx instruction.
TF_ASSIGN_OR_RETURN(auto* sqrt, make_sqrt());
- return ir_builder_->CreateFDiv(llvm::ConstantFP::get(llvm_ty, 1), sqrt);
+ return b_->CreateFDiv(llvm::ConstantFP::get(llvm_ty, 1), sqrt);
}
VLOG(10) << "emitting pow as regular call to pow(): " << op->ToString();
@@ -302,32 +302,31 @@ llvm::Value* GpuElementalIrEmitter::EmitDeviceFunctionCall(
// Declares the callee if it is not declared already.
llvm::Function* callee = llvm::cast<llvm::Function>(
- ir_builder_->GetInsertBlock()->getModule()->getOrInsertFunction(
+ b_->GetInsertBlock()->getModule()->getOrInsertFunction(
llvm_ir::AsStringRef(callee_name), callee_type));
for (auto attribute : attributes) {
callee->addFnAttr(attribute);
}
- return ir_builder_->CreateCall(callee, llvm_ir::AsArrayRef(operands));
+ return b_->CreateCall(callee, llvm_ir::AsArrayRef(operands));
}
llvm::Value* GpuElementalIrEmitter::EmitThreadId() const {
- llvm::Value* block_id = ir_builder_->CreateIntCast(
+ llvm::Value* block_id = b_->CreateIntCast(
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x,
- {}, {}, ir_builder_),
- ir_builder_->getIntNTy(128), /*isSigned=*/true, "block.id");
- llvm::Value* thread_id_in_block = ir_builder_->CreateIntCast(
+ {}, {}, b_),
+ b_->getIntNTy(128), /*isSigned=*/true, "block.id");
+ llvm::Value* thread_id_in_block = b_->CreateIntCast(
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x,
- {}, {}, ir_builder_),
- ir_builder_->getIntNTy(128), /*isSigned=*/true, "thread.id");
- llvm::Value* threads_per_block = ir_builder_->CreateIntCast(
+ {}, {}, b_),
+ b_->getIntNTy(128), /*isSigned=*/true, "thread.id");
+ llvm::Value* threads_per_block = b_->CreateIntCast(
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x,
- {}, {}, ir_builder_),
- ir_builder_->getIntNTy(128), /*isSigned=*/true, "threads_per_block");
- return ir_builder_->CreateNSWAdd(
- ir_builder_->CreateNSWMul(block_id, threads_per_block),
- thread_id_in_block);
+ {}, {}, b_),
+ b_->getIntNTy(128), /*isSigned=*/true, "threads_per_block");
+ return b_->CreateNSWAdd(b_->CreateNSWMul(block_id, threads_per_block),
+ thread_id_in_block);
}
llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
@@ -373,12 +372,12 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
PrimitiveType operand_element_type = operand->shape().element_type();
llvm::Value* accum_ptr = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(operand_element_type, module_),
- "reduce_window_accum_ptr", ir_builder_);
+ "reduce_window_accum_ptr", b_);
{
TF_ASSIGN_OR_RETURN(llvm::Value * init_value,
operand_to_generator.at(hlo->operand(1))(
IrArray::Index(index.GetType())));
- ir_builder_->CreateStore(init_value, accum_ptr);
+ b_->CreateStore(init_value, accum_ptr);
}
llvm::Type* index_type = index.GetType();
@@ -386,7 +385,7 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
return index.GetConstantWithIndexType(c);
};
- llvm_ir::ForLoopNest loops(IrName(hlo), ir_builder_, index_type);
+ llvm_ir::ForLoopNest loops(IrName(hlo), b_, index_type);
std::vector<int64> window_size;
for (const auto& dim : window.dimensions()) {
window_size.push_back(dim.size());
@@ -395,15 +394,15 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
CHECK_EQ(window_index.size(), index.size());
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b_);
IrArray::Index input_index(index_type, index.size());
- llvm::Value* in_bounds = ir_builder_->getInt1(true);
+ llvm::Value* in_bounds = b_->getInt1(true);
for (size_t i = 0; i < index.size(); ++i) {
- llvm::Value* stridden_index = ir_builder_->CreateNSWMul(
+ llvm::Value* stridden_index = b_->CreateNSWMul(
index[i], index_typed_const(window.dimensions(i).stride()));
- input_index[i] = ir_builder_->CreateNSWSub(
- ir_builder_->CreateNSWAdd(stridden_index, window_index[i]),
+ input_index[i] = b_->CreateNSWSub(
+ b_->CreateNSWAdd(stridden_index, window_index[i]),
index_typed_const(window.dimensions(i).padding_low()));
// We must check whether 0 ≤ input_index[i] < bound, as otherwise
@@ -411,16 +410,16 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
// comparison is equivalent to the unsigned comparison
// input_index[i] < bound, as a negative value wraps to a large
// positive value.
- in_bounds = ir_builder_->CreateAnd(
+ in_bounds = b_->CreateAnd(
in_bounds,
- ir_builder_->CreateICmpULT(
+ b_->CreateICmpULT(
input_index[i],
index_typed_const(operand->shape().dimensions(i))));
}
llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(in_bounds, "in_bounds", ir_builder_);
- SetToFirstInsertPoint(if_data.true_block, ir_builder_);
+ llvm_ir::EmitIfThenElse(in_bounds, "in_bounds", b_);
+ SetToFirstInsertPoint(if_data.true_block, b_);
// We are not in pad, so do the computation.
TF_ASSIGN_OR_RETURN(llvm::Value * input_value,
@@ -428,26 +427,26 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
TF_ASSIGN_OR_RETURN(
llvm::Value * accum_value,
compute_nested_(*hlo->to_apply(),
- {ir_builder_->CreateLoad(accum_ptr), input_value}));
- ir_builder_->CreateStore(accum_value, accum_ptr);
+ {b_->CreateLoad(accum_ptr), input_value}));
+ b_->CreateStore(accum_value, accum_ptr);
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), ir_builder_);
- return ir_builder_->CreateLoad(accum_ptr);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b_);
+ return b_->CreateLoad(accum_ptr);
};
case HloOpcode::kReduce:
return [=, &operand_to_generator](
const IrArray::Index& output_index) -> StatusOr<llvm::Value*> {
const HloInstruction* operand = hlo->operand(0);
llvm::Value* accum_ptr =
- ir_builder()->CreateAlloca(llvm_ir::PrimitiveTypeToIrType(
+ b()->CreateAlloca(llvm_ir::PrimitiveTypeToIrType(
hlo->shape().element_type(), module_));
llvm::Type* index_type = output_index.GetType();
TF_ASSIGN_OR_RETURN(llvm::Value * init_value,
operand_to_generator.at(hlo->operand(1))(
IrArray::Index(index_type)));
- ir_builder()->CreateStore(init_value, accum_ptr);
+ b()->CreateStore(init_value, accum_ptr);
- llvm_ir::ForLoopNest loops(IrName(hlo), ir_builder_, index_type);
+ llvm_ir::ForLoopNest loops(IrName(hlo), b_, index_type);
IrArray::Index input_index = loops.AddLoopsForShapeOnDimensions(
operand->shape(), hlo->dimensions(), "reduction_dim");
if (!ShapeUtil::IsScalar(hlo->shape())) {
@@ -462,18 +461,17 @@ llvm_ir::ElementGenerator GpuElementalIrEmitter::MakeElementGenerator(
CHECK_EQ(output_index.size(), j);
}
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), ir_builder());
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), b());
TF_ASSIGN_OR_RETURN(
llvm::Value * input_value,
operand_to_generator.at(hlo->operand(0))(input_index));
TF_ASSIGN_OR_RETURN(
llvm::Value * accum_value,
- compute_nested_(
- *hlo->to_apply(),
- {ir_builder()->CreateLoad(accum_ptr), input_value}));
- ir_builder()->CreateStore(accum_value, accum_ptr);
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), ir_builder());
- return ir_builder()->CreateLoad(accum_ptr);
+ compute_nested_(*hlo->to_apply(),
+ {b()->CreateLoad(accum_ptr), input_value}));
+ b()->CreateStore(accum_value, accum_ptr);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), b());
+ return b()->CreateLoad(accum_ptr);
};
default:
return ElementalIrEmitter::MakeElementGenerator(hlo,
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h
index 91f4d960aa..e3eacef133 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.h
@@ -43,7 +43,7 @@ class GpuElementalIrEmitter : public ElementalIrEmitter {
const HloComputation&, tensorflow::gtl::ArraySlice<llvm::Value*>)>;
GpuElementalIrEmitter(const HloModuleConfig& hlo_module_config,
- llvm::Module* module, llvm::IRBuilder<>* ir_builder,
+ llvm::Module* module, llvm::IRBuilder<>* b,
NestedComputer compute_nested);
llvm_ir::ElementGenerator MakeElementGenerator(
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_constants.cc b/tensorflow/compiler/xla/service/gpu/gpu_constants.cc
index aa360c7f73..e6ddea6d25 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_constants.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_constants.cc
@@ -14,12 +14,21 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/service/gpu/gpu_constants.h"
+#include "tensorflow/core/framework/allocator.h"
namespace xla {
namespace gpu {
-// http://docs.nvidia.com/cuda/cuda-c-programming-guide/#device-memory-accesses
-const int64 kCudaMallocAlignBytes = 256;
+// kEntryParameterAlignBytes is equal to EIGEN_MAX_ALIGN_BYTES, though including
+// Eigen headers here to get that symbol may not be a good idea.
+// EIGEN_MAX_ALIGN_BYTES may differ between CUDA-enabled builds vs CUDA-disabled
+// builds and we don't want the IR generated by XLA:GPU to depend on that.
+//
+// TODO(b/111767313): Consider raising EIGEN_MAX_ALIGN_BYTES if it helps.
+const int64 kEntryParameterAlignBytes = 16;
+
+const int64 kXlaAllocatedBufferAlignBytes =
+ tensorflow::Allocator::kAllocatorAlignment;
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_constants.h b/tensorflow/compiler/xla/service/gpu/gpu_constants.h
index eb1ca4c6c9..925e6927b6 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_constants.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_constants.h
@@ -21,9 +21,12 @@ limitations under the License.
namespace xla {
namespace gpu {
-// Minimum alignment of cudaMalloc. We require that buffers created by our
-// DeviceMemoryAllocator, and all input/output buffers, have this alignment.
-extern const int64 kCudaMallocAlignBytes;
+// Minimum alignment for buffers passed as incoming arguments by TensorFlow.
+extern const int64 kEntryParameterAlignBytes;
+
+// Minimum alignment for buffers allocated by XLA: the temp buffers and the live
+// out (result) buffers.
+extern const int64 kXlaAllocatedBufferAlignBytes;
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
index 3c8018a030..79b3f1efec 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.cc
@@ -22,7 +22,7 @@ limitations under the License.
#include "llvm/IR/DataLayout.h"
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
-#include "tensorflow/compiler/xla/service/gpu/gpu_compiler.h"
+#include "tensorflow/compiler/xla/service/gpu/nvptx_compiler.h"
#include "tensorflow/compiler/xla/service/gpu/outfeed_manager.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
@@ -36,15 +36,14 @@ limitations under the License.
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
+namespace gpu {
// TODO(b/30467474) Once GPU infeed implementation settles, consider
// folding back the cpu and gpu infeed implementations into a generic
// one if possible.
-GpuTransferManager::GpuTransferManager()
- : GenericTransferManager(
- se::cuda::kCudaPlatformId,
- /*pointer_size=*/llvm::DataLayout(gpu::GpuCompiler::kDataLayout)
- .getPointerSize(0 /* default address space */)) {}
+GpuTransferManager::GpuTransferManager(se::Platform::Id id,
+ unsigned pointer_size)
+ : GenericTransferManager(id, pointer_size) {}
Status GpuTransferManager::TransferLiteralToInfeed(
se::StreamExecutor* executor, const LiteralSlice& literal) {
@@ -52,48 +51,28 @@ Status GpuTransferManager::TransferLiteralToInfeed(
VLOG(2) << "Transferring literal to infeed with shape: "
<< ShapeUtil::HumanString(shape);
- if (!ShapeUtil::IsTuple(shape)) {
- int64 size = GetByteSizeRequirement(shape);
- return TransferBufferToInfeed(executor, size, literal.untyped_data());
- }
-
// For a tuple, we transfer each of its elements to the device and
// enqueue the resulting destination device addresses with the
// infeed manager.
- std::vector<gpu::InfeedBuffer*> buffers;
- auto cleanup = tensorflow::gtl::MakeCleanup([buffers]() {
- for (gpu::InfeedBuffer* b : buffers) {
- b->Done();
- }
- });
+ ShapeTree<InfeedBuffer> buffer_tree(shape);
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
shape, [&](const Shape& literal_subshape, const ShapeIndex& index) {
if (ShapeUtil::IsArray(literal_subshape)) {
int64 tuple_element_size = GetByteSizeRequirement(literal_subshape);
TF_ASSIGN_OR_RETURN(
- gpu::InfeedBuffer * buffer,
+ *buffer_tree.mutable_element(index),
TransferBufferToInfeedInternal(executor, tuple_element_size,
literal.untyped_data(index)));
- buffers.push_back(buffer);
}
return Status::OK();
}));
- cleanup.release();
- return EnqueueBuffersToInfeed(executor, buffers);
-}
-
-Status GpuTransferManager::TransferBufferToInfeed(se::StreamExecutor* executor,
- int64 size,
- const void* source) {
- TF_ASSIGN_OR_RETURN(gpu::InfeedBuffer * buffer,
- TransferBufferToInfeedInternal(executor, size, source));
- return EnqueueBuffersToInfeed(executor, {buffer});
+ return EnqueueBuffersToInfeed(executor, std::move(buffer_tree));
}
Status GpuTransferManager::EnqueueBuffersToInfeed(
- se::StreamExecutor* executor, std::vector<gpu::InfeedBuffer*> buffers) {
+ se::StreamExecutor* executor, ShapeTree<InfeedBuffer> buffers) {
gpu::InfeedManager* infeed_manager = gpu::GetOrCreateInfeedManager();
se::Stream* stream = infeed_manager->GetStream(executor);
@@ -103,21 +82,18 @@ Status GpuTransferManager::EnqueueBuffersToInfeed(
// possible.
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
- for (gpu::InfeedBuffer* b : buffers) {
- b->Done();
- }
return InternalError("Failed to complete data transfer on stream %p: %s",
stream, block_status.error_message().c_str());
}
- infeed_manager->EnqueueBuffers(buffers);
+ infeed_manager->EnqueueDestination(std::move(buffers));
VLOG(2) << "Infeed data transferred";
return Status::OK();
}
-StatusOr<gpu::InfeedBuffer*> GpuTransferManager::TransferBufferToInfeedInternal(
+StatusOr<InfeedBuffer> GpuTransferManager::TransferBufferToInfeedInternal(
se::StreamExecutor* executor, int64 size, const void* source) {
if (size > std::numeric_limits<int32>::max()) {
return InvalidArgument("Infeed shape is too large: needs %lld bytes", size);
@@ -133,12 +109,12 @@ StatusOr<gpu::InfeedBuffer*> GpuTransferManager::TransferBufferToInfeedInternal(
return InternalError("Failed to obtain a stream");
}
- gpu::InfeedBuffer* buffer = new gpu::InfeedBuffer(executor, size);
- stream->ThenMemcpy(buffer->device_memory(), source, size);
+ InfeedBuffer buffer(executor, size);
+ stream->ThenMemcpy(buffer.device_memory(), source, size);
VLOG(2) << "Queued infeed data on stream " << stream;
- return buffer;
+ return std::move(buffer);
}
static std::unique_ptr<Literal> ShapeTreeToLiteral(
@@ -191,22 +167,26 @@ Status GpuTransferManager::TransferLiteralFromOutfeed(
// Give the tree of buffers to the outfeed mananger. The device will fill it
// while we're waiting for it below.
gpu::OutfeedManager* outfeed_manager = gpu::GetOrCreateOutfeedManager();
- outfeed_manager->EnqueueOutfeedDestination(&outfeed_buffers);
+ outfeed_manager->EnqueueDestination(&outfeed_buffers);
// Now turn the tree of buffers back into a literal.
*literal = std::move(*ShapeTreeToLiteral(&outfeed_buffers));
return Status::OK();
}
+} // namespace gpu
} // namespace xla
-static std::unique_ptr<xla::TransferManager> CreateGpuTransferManager() {
- return xla::MakeUnique<xla::GpuTransferManager>();
+static std::unique_ptr<xla::TransferManager> CreateNVPTXTransferManager() {
+ return xla::MakeUnique<xla::gpu::GpuTransferManager>(
+ /*id=*/stream_executor::cuda::kCudaPlatformId,
+ /*pointer_size=*/llvm::DataLayout(xla::gpu::NVPTXCompiler::kDataLayout)
+ .getPointerSize(0 /* default address space */));
}
static bool InitModule() {
xla::TransferManager::RegisterTransferManager(
- stream_executor::cuda::kCudaPlatformId, &CreateGpuTransferManager);
+ stream_executor::cuda::kCudaPlatformId, &CreateNVPTXTransferManager);
return true;
}
static bool module_initialized = InitModule();
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
index 9dff1e5a50..dceeb9e2eb 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
@@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/generic_transfer_manager.h"
#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
#include "tensorflow/compiler/xla/service/transfer_manager.h"
+#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/macros.h"
@@ -28,18 +29,17 @@ limitations under the License.
#include "tensorflow/core/platform/types.h"
namespace xla {
+namespace gpu {
// An implementation of the XLA GenericTransferManager that
// handles GPU-specific infeed.
class GpuTransferManager : public GenericTransferManager {
public:
- GpuTransferManager();
+ GpuTransferManager(se::Platform::Id id, unsigned pointer_size);
~GpuTransferManager() override {}
Status TransferLiteralToInfeed(se::StreamExecutor* executor,
const LiteralSlice& literal) override;
- Status TransferBufferToInfeed(se::StreamExecutor* executor, int64 size,
- const void* source) override;
Status TransferLiteralFromOutfeed(se::StreamExecutor* executor,
const Shape& literal_shape,
Literal* literal) override;
@@ -47,17 +47,18 @@ class GpuTransferManager : public GenericTransferManager {
private:
// Initiates the infeed data transfers. InfeedBuffer->Done() must be
// called to clean up the memory allocated for InfeedBuffer.
- StatusOr<gpu::InfeedBuffer*> TransferBufferToInfeedInternal(
+ StatusOr<InfeedBuffer> TransferBufferToInfeedInternal(
se::StreamExecutor* executor, int64 size, const void* source);
// Enqueues infeed data buffers with the infeed manager after their
// transfer completes.
Status EnqueueBuffersToInfeed(se::StreamExecutor* executor,
- std::vector<gpu::InfeedBuffer*> buffers);
+ ShapeTree<InfeedBuffer> buffers);
TF_DISALLOW_COPY_AND_ASSIGN(GpuTransferManager);
};
+} // namespace gpu
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TRANSFER_MANAGER_H_
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
index d420863b85..1b6315ec03 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.cc
@@ -39,7 +39,7 @@ void HloToIrBindings::EmitBasePointersForHlos(
// I/O HLOs are bound to the arguments of the current IR function. I.e.,
//
// void IrFunction(io_0, io_1, ..., io_{m-1}, temp_buffer_base) {
- llvm::Function* function = ir_builder_->GetInsertBlock()->getParent();
+ llvm::Function* function = b_->GetInsertBlock()->getParent();
CHECK_EQ(io_hlos.size() + 1, function->arg_size());
// An HLO can have duplicated operands. This data structure remembers which
@@ -79,8 +79,8 @@ void HloToIrBindings::EmitBasePointersForHlos(
const int64 offset = slice.offset();
CHECK_NE(nullptr, temp_buffer_base_);
// Emit IR for GetTupleElement instruction and bind to emitted value.
- llvm::Value* base_ptr = ir_builder_->CreateInBoundsGEP(
- temp_buffer_base_, ir_builder_->getInt64(offset));
+ llvm::Value* base_ptr =
+ b_->CreateInBoundsGEP(temp_buffer_base_, b_->getInt64(offset));
BindHloToIrValue(*non_io_hlo,
EmitGetTupleElement(non_io_hlo, base_ptr));
}
@@ -108,15 +108,14 @@ void HloToIrBindings::EmitBasePointersForHlos(
if (slice.allocation()->is_thread_local()) {
llvm::Type* pointee_type =
llvm_ir::ShapeToIrType(non_io_hlo->shape(), module_);
- BindHloToIrValue(*non_io_hlo,
- ir_builder_->CreateAlloca(pointee_type), index);
+ BindHloToIrValue(*non_io_hlo, b_->CreateAlloca(pointee_type),
+ index);
} else {
const int64 offset = slice.offset();
CHECK_NE(nullptr, temp_buffer_base_);
BindHloToIrValue(
*non_io_hlo,
- ir_builder_->CreateInBoundsGEP(temp_buffer_base_,
- ir_builder_->getInt64(offset)),
+ b_->CreateInBoundsGEP(temp_buffer_base_, b_->getInt64(offset)),
index);
}
});
@@ -129,11 +128,11 @@ llvm::Value* HloToIrBindings::EmitGetTupleElement(const HloInstruction* gte,
if (gte->operand(0)->opcode() != HloOpcode::kGetTupleElement) {
return llvm_ir::EmitGetTupleElement(
gte->shape(), gte->tuple_index(), /*alignment=*/1,
- GetTypedIrValue(*gte->operand(0), {}, base_ptr), ir_builder_, module_);
+ GetTypedIrValue(*gte->operand(0), {}, base_ptr), b_, module_);
}
return llvm_ir::EmitGetTupleElement(
gte->shape(), gte->tuple_index(), /*alignment=*/1,
- EmitGetTupleElement(gte->operand(0), base_ptr), ir_builder_, module_);
+ EmitGetTupleElement(gte->operand(0), base_ptr), b_, module_);
}
llvm::Value* HloToIrBindings::GetTypedIrValue(const HloInstruction& hlo,
@@ -145,11 +144,10 @@ llvm::Value* HloToIrBindings::GetTypedIrValue(const HloInstruction& hlo,
llvm::Value* typed_ir_value;
if (llvm::isa<llvm::GlobalVariable>(ir_value)) {
- typed_ir_value = llvm::ConstantExpr::getBitCast(
+ typed_ir_value = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
llvm::cast<llvm::GlobalVariable>(ir_value), dest_type);
} else {
- typed_ir_value =
- ir_builder_->CreateBitCast(ir_value, pointee_type->getPointerTo());
+ typed_ir_value = b_->CreateBitCast(ir_value, pointee_type->getPointerTo());
}
ir_value->setName(llvm_ir::AsStringRef(llvm_ir::IrName(&hlo, "raw")));
typed_ir_value->setName(llvm_ir::AsStringRef(llvm_ir::IrName(&hlo, "typed")));
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h
index a86e6e78c6..eee40b0e91 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h
+++ b/tensorflow/compiler/xla/service/gpu/hlo_to_ir_bindings.h
@@ -36,14 +36,13 @@ class HloToIrBindings {
public:
HloToIrBindings(const HloModule& module,
const BufferAssignment* buffer_assignment,
- llvm::IRBuilder<>* ir_builder, llvm::Module* llvm_module,
+ llvm::IRBuilder<>* b, llvm::Module* llvm_module,
bool is_nested)
: buffer_assignment_(buffer_assignment),
is_nested_(is_nested),
- ir_builder_(ir_builder),
+ b_(b),
module_(llvm_module),
- alias_analysis_(module, *buffer_assignment_,
- &ir_builder_->getContext()) {}
+ alias_analysis_(module, *buffer_assignment_, &b_->getContext()) {}
void EmitBasePointersForHlos(
tensorflow::gtl::ArraySlice<const HloInstruction*> io_hlos,
@@ -104,7 +103,7 @@ class HloToIrBindings {
const bool is_nested_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
llvm::Module* module_;
// Stores the underlying llvm::IrArray for each HloInstruction.
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_manager.cc b/tensorflow/compiler/xla/service/gpu/infeed_manager.cc
index ae310beefa..c5f0cdf6cd 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/infeed_manager.cc
@@ -15,76 +15,13 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/infeed_manager.h"
-#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
-#include "tensorflow/core/platform/logging.h"
namespace xla {
namespace gpu {
-InfeedManager::InfeedManager() : host_to_device_executor_(nullptr) {}
-
-void InfeedManager::Reset() {
- tensorflow::mutex_lock l(mu_);
- CHECK(dequeued_buffer_.empty());
- for (auto buffer : enqueued_buffer_) {
- buffer->Done();
- }
- enqueued_buffer_.clear();
-}
-
-void InfeedManager::EnqueueBuffers(const std::vector<InfeedBuffer*>& buffers) {
- tensorflow::mutex_lock l(mu_);
- bool was_empty = enqueued_buffer_.empty();
- for (gpu::InfeedBuffer* b : buffers) {
- enqueued_buffer_.push_back(b);
- }
- if (was_empty) {
- // This has the potential to suffer from the notified thread
- // immediately trying and failing to acquire mu_, but seems
- // preferable to the alternative of notifying outside the lock
- // on every enqueue.
- cv_.notify_one();
- }
-}
-
-InfeedBuffer* InfeedManager::BlockingDequeueBuffer() {
- bool became_empty = false;
- InfeedBuffer* current_buffer;
- {
- tensorflow::mutex_lock l(mu_);
- while (enqueued_buffer_.empty()) {
- cv_.wait(l);
- }
- current_buffer = enqueued_buffer_.front();
- enqueued_buffer_.pop_front();
- dequeued_buffer_.insert(current_buffer);
- if (enqueued_buffer_.empty()) {
- became_empty = true;
- }
- }
- if (became_empty) {
- for (const auto& callback : on_empty_callbacks_) {
- callback();
- }
- }
- return current_buffer;
-}
-
-void InfeedManager::ReleaseBuffers(const std::vector<InfeedBuffer*>& buffers) {
- {
- tensorflow::mutex_lock l(mu_);
- for (gpu::InfeedBuffer* b : buffers) {
- CHECK(ContainsKey(dequeued_buffer_, b));
- dequeued_buffer_.erase(b);
- }
- }
- for (gpu::InfeedBuffer* b : buffers) {
- b->Done();
- }
-}
-
se::Stream* InfeedManager::GetStream(se::StreamExecutor* executor) {
+ tensorflow::mutex_lock l(host_to_device_stream_mu_);
if (host_to_device_executor_ == nullptr) {
host_to_device_executor_ = executor;
host_to_device_stream_ = MakeUnique<se::Stream>(executor);
@@ -100,10 +37,6 @@ se::Stream* InfeedManager::GetStream(se::StreamExecutor* executor) {
return host_to_device_stream_.get();
}
-void InfeedManager::RegisterOnEmptyCallback(std::function<void()> callback) {
- on_empty_callbacks_.push_back(std::move(callback));
-}
-
InfeedManager* GetOrCreateInfeedManager() {
static InfeedManager* manager = new InfeedManager;
return manager;
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_manager.h b/tensorflow/compiler/xla/service/gpu/infeed_manager.h
index a3fc15cfe3..7e418882e0 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/infeed_manager.h
@@ -20,12 +20,9 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_MANAGER_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_INFEED_MANAGER_H_
-#include <deque>
-#include <vector>
-
+#include "tensorflow/compiler/xla/service/gpu/xfeed_queue.h"
+#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/types.h"
-#include "tensorflow/core/lib/gtl/flatset.h"
-#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
@@ -47,90 +44,41 @@ namespace gpu {
// the client. The client manages the memory of the buffer.
class InfeedBuffer {
public:
+ InfeedBuffer() = default;
InfeedBuffer(se::StreamExecutor* executor, int64 length)
- : executor_(executor), length_(length) {
- device_memory_ = executor_->AllocateArray<uint8>(length);
- CHECK(!device_memory_.is_null());
+ : device_memory_(executor, executor->AllocateArray<uint8>(length)),
+ length_(length) {
+ CHECK(!device_memory_->is_null());
}
- ~InfeedBuffer() { executor_->Deallocate(&device_memory_); }
-
int64 length() const { return length_; }
- // Callback to signal that this buffer is consumed. This helps the
- // client to manage memory for the infeed buffers.
- void Done() { delete this; }
-
- se::DeviceMemoryBase* device_memory() { return &device_memory_; }
+ se::DeviceMemoryBase* device_memory() { return device_memory_.ptr(); }
private:
- se::StreamExecutor* executor_; // Not owned.
- const int64 length_;
- se::DeviceMemoryBase device_memory_;
+ se::ScopedDeviceMemory<uint8> device_memory_;
+ int64 length_;
};
// Client-side class used to enqueue infeed buffers.
-class InfeedManager {
+class InfeedManager : public XfeedQueue<ShapeTree<InfeedBuffer>> {
public:
- InfeedManager();
-
- // Calls the completion callback for any enqueued buffers that have
- // not been dequeued by the runtime, and empties the infeed
- // queue. Reset may not be called while a runtime computation is
- // processing a dequeued buffer. The only safe way to ensure this
- // condition is to call Reset when no computation is taking place.
- void Reset();
-
- // Adds a set of buffers to the infeed queue atomically. buffer->Done
- // will be called when the buffer will no longer be accessed by the
- // InfeedManager, either as a result of a call to Reset or because the
- // runtime has dequeued and used the buffer.
- void EnqueueBuffers(const std::vector<InfeedBuffer*>& buffers);
-
- // Blocks until the infeed queue is non-empty, then returns the
- // buffer at the head of the queue. Adds the current buffer to the
- // to-be released set.
- InfeedBuffer* BlockingDequeueBuffer();
-
- // Releases a set of buffers from the to-be released set.
- void ReleaseBuffers(const std::vector<InfeedBuffer*>& buffers);
-
// Returns a cached stream associated with an executor. Allocates a
// new stream on the first invocation. On subsequent invocations, if
// the cached executor is not the same as the requested executor,
// returns null.
se::Stream* GetStream(se::StreamExecutor* executor);
- // Registers a callback that will be called when 'enqueued_buffer_' becomes
- // empty.
- void RegisterOnEmptyCallback(std::function<void()> callback);
-
private:
- // TODO(b/30467474): Revisit if this mutex becomes a point of
- // contention.
- tensorflow::mutex mu_;
-
- // Condition variable that is signaled every time a buffer is
- // enqueued to an empty queue.
- tensorflow::condition_variable cv_;
-
- // InfeedBuffer* queue contents are not owned, but buffer->Done must
- // be called when the buffer is no longer needed by the runtime.
- std::deque<InfeedBuffer*> enqueued_buffer_;
-
- // Buffers that are dequeued and currently being processed by the
- // runtime. Not owned.
- tensorflow::gtl::FlatSet<const InfeedBuffer*> dequeued_buffer_;
+ // Mutex for serializing the creation of host_to_device_stream_.
+ tensorflow::mutex host_to_device_stream_mu_;
// Cached host to device stream for queuing infeed data.
- std::unique_ptr<se::Stream> host_to_device_stream_;
+ std::unique_ptr<se::Stream> host_to_device_stream_
+ GUARDED_BY(host_to_device_stream_mu_);
// Executor that the host_to_device_stream belongs to. Not owned.
- se::StreamExecutor* host_to_device_executor_;
-
- // List of callbacks which will be called when 'enqueued_buffer_' becomes
- // empty.
- std::vector<std::function<void()>> on_empty_callbacks_;
+ se::StreamExecutor* host_to_device_executor_ = nullptr;
};
// Singleton creator-or-accessor: Returns the GPU infeed manager.
diff --git a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
index 62915febb1..fee6d2af3b 100644
--- a/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/infeed_thunk.cc
@@ -30,51 +30,68 @@ InfeedThunk::InfeedThunk(
Status InfeedThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
se::Stream* stream,
HloExecutionProfiler* profiler) {
- VLOG(2) << "Infeeding to GPU ";
+ VLOG(2) << "Infeeding to GPU: " << hlo_instruction()->ToString();
auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
- // First copy the infeed data which is element 0 of the infeed instruction's
- // two-tuple output (the other element is a token).
- se::DeviceMemoryBase data_address =
- buffer_allocations.GetDeviceAddress(infeed_slices_.element({0}));
- InfeedManager* infeed_manager = GetOrCreateInfeedManager();
- std::vector<InfeedBuffer*> infeed_buffers;
- const Shape& data_shape =
- ShapeUtil::GetTupleElementShape(hlo_instruction()->shape(), 0);
- if (ShapeUtil::IsTuple(data_shape)) {
- CHECK(!ShapeUtil::IsNestedTuple(data_shape));
- // Transfer the tuple elements first.
+ ShapeTree<InfeedBuffer> infeed_buffers =
+ GetOrCreateInfeedManager()->BlockingGetNextDestination();
+
+ {
+ // The infeed buffer has an extra outer tuple with a token. Adjust the index
+ // accordingly.
+ ShapeIndex index = {0};
+ std::function<void(std::vector<void*>*)> copy_tuple_contents =
+ [&](std::vector<void*>* tuple_element_addresses) {
+ const Shape& shape = ShapeUtil::GetSubshape(infeed_buffers.shape(),
+ ShapeIndexView(index, 1));
+ // For the leaf buffers of the tuple copy the elements directly.
+ if (ShapeUtil::IsArray(shape)) {
+ const BufferAllocation::Slice& tuple_element_buffer =
+ infeed_slices_.element(index);
+ se::DeviceMemoryBase tuple_element_address =
+ buffer_allocations.GetDeviceAddress(tuple_element_buffer);
+
+ InfeedBuffer* buffer =
+ infeed_buffers.mutable_element(ShapeIndexView(index, 1));
+ stream->ThenMemcpy(&tuple_element_address,
+ *(buffer->device_memory()), buffer->length());
+ tuple_element_addresses->push_back(tuple_element_address.opaque());
+ return;
+ }
+
+ const int64 tuple_element_count = ShapeUtil::TupleElementCount(shape);
+ index.push_back(0);
+ std::vector<void*> inner_tuple_element_addresses;
+ for (int64 i = 0; i < tuple_element_count; ++i) {
+ index.back() = i;
+ copy_tuple_contents(&inner_tuple_element_addresses);
+ }
+ index.pop_back();
+
+ // Create a buffer of pointers for non-leaf buffers.
+ CHECK_EQ(tuple_element_count, inner_tuple_element_addresses.size());
+ auto host_size = inner_tuple_element_addresses.size() * sizeof(void*);
+ se::DeviceMemoryBase tuple_address =
+ buffer_allocations.GetDeviceAddress(
+ infeed_slices_.element(index));
+ stream->ThenMemcpy(&tuple_address,
+ inner_tuple_element_addresses.data(), host_size);
+ tuple_element_addresses->push_back(tuple_address.opaque());
+ };
+
std::vector<void*> tuple_element_addresses;
- for (int i = 0; i < ShapeUtil::TupleElementCount(data_shape); ++i) {
- const BufferAllocation::Slice& tuple_element_buffer =
- infeed_slices_.element({0, i});
- se::DeviceMemoryBase tuple_element_address =
- buffer_allocations.GetDeviceAddress(tuple_element_buffer);
-
- InfeedBuffer* buffer = infeed_manager->BlockingDequeueBuffer();
- infeed_buffers.push_back(buffer);
- stream->ThenMemcpy(&tuple_element_address, *(buffer->device_memory()),
- buffer->length());
- tuple_element_addresses.push_back(tuple_element_address.opaque());
- }
- // Transfer the tuple outer buffer.
- auto host_size = tuple_element_addresses.size() * sizeof(void*);
- stream->ThenMemcpy(&data_address, tuple_element_addresses.data(),
- host_size);
- } else {
- InfeedBuffer* buffer = infeed_manager->BlockingDequeueBuffer();
- infeed_buffers.push_back(buffer);
- stream->ThenMemcpy(&data_address, *(buffer->device_memory()),
- buffer->length());
+ copy_tuple_contents(&tuple_element_addresses);
+ CHECK_EQ(1, tuple_element_addresses.size());
}
// Construct top-level tuple of infeed containing the data and the token. Use
// a nullptr for the token, it should never be dereferenced.
- std::vector<void*> infeed_addresses = {data_address.opaque(), nullptr};
+ se::DeviceMemoryBase data_address =
+ buffer_allocations.GetDeviceAddress(infeed_slices_.element({0}));
+ void* infeed_addresses[] = {data_address.opaque(), nullptr};
se::DeviceMemoryBase top_level_address =
buffer_allocations.GetDeviceAddress(infeed_slices_.element({}));
- stream->ThenMemcpy(&top_level_address, infeed_addresses.data(),
- 2 * sizeof(void*));
+ stream->ThenMemcpy(&top_level_address, infeed_addresses, 2 * sizeof(void*));
Status block_status = stream->BlockHostUntilDone();
if (!block_status.ok()) {
@@ -82,8 +99,6 @@ Status InfeedThunk::ExecuteOnStream(const BufferAllocations& buffer_allocations,
stream, block_status.error_message().c_str());
}
- infeed_manager->ReleaseBuffers(infeed_buffers);
-
VLOG(2) << "Infeeding to GPU complete";
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/gpu/instruction_fusion.cc b/tensorflow/compiler/xla/service/gpu/instruction_fusion.cc
index 64ed3d748f..8abae43a5a 100644
--- a/tensorflow/compiler/xla/service/gpu/instruction_fusion.cc
+++ b/tensorflow/compiler/xla/service/gpu/instruction_fusion.cc
@@ -183,8 +183,53 @@ bool GpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
return true;
}
- return IsFusile(*producer) && IsFusile(*consumer) &&
- InstructionFusion::ShouldFuse(consumer, operand_index);
+ if (!IsFusile(*producer) || !IsFusile(*consumer) ||
+ !InstructionFusion::ShouldFuse(consumer, operand_index)) {
+ return false;
+ }
+
+ // Limit the maximum number of operands to a fusion.
+ //
+ // There's a limit to how many parameters we can pass to a CUDA kernel, but
+ // exactly what that limit is is hazy, as it depends on (among other things)
+ // how much GPU constant memory is in use for other purposes.
+ //
+ // Moreover, we don't even know at this point how many arguments the CUDA
+ // kernel for this fusion node will have: It depends on buffer assignment,
+ // where we will decide which of the fusion's operands live in XLA's big temp
+ // buffer versus in other allocations.
+ //
+ // As a heuristic, we simply cap the number of fusion operands at
+ // kMaxOperandsPerFusion. This puts an upper bound on the number of
+ // parameters to the kernel, working around the correctness problem.
+ //
+ // This limit is also often good for performance. In a fusion with many
+ // operands, each GPU thread likely has to do a lot of work, and so possibly
+ // uses a lot of registers, thus limiting occupancy.
+ //
+ // We put this check last because it's expensive to compute.
+
+ // The new fusion will have no more operands than
+ // producer_operands + consumer_operands - 1
+ // (minus one because we're fusing the producer->consumer edge). This fact
+ // may be enough to let us avoid having to compute the true total number of
+ // operands, taking into account the fact that producer and consumer may share
+ // operands.
+ if (producer->operand_count() + consumer->operand_count() - 1 >
+ kMaxOperandsPerFusion) {
+ tensorflow::gtl::FlatSet<const HloInstruction*> producer_operands(
+ producer->operands().begin(), producer->operands().end());
+ int64 new_num_operands =
+ producer->operand_count() +
+ c_count_if(consumer->operands(), [&](const HloInstruction* operand) {
+ return operand != producer && !producer_operands.count(operand);
+ });
+ if (new_num_operands > kMaxOperandsPerFusion) {
+ return false;
+ }
+ }
+
+ return true;
}
bool GpuInstructionFusion::ShouldFuseIntoMultiOutput(HloInstruction* consumer,
diff --git a/tensorflow/compiler/xla/service/gpu/instruction_fusion.h b/tensorflow/compiler/xla/service/gpu/instruction_fusion.h
index f629d9ff2c..5ee1c004b6 100644
--- a/tensorflow/compiler/xla/service/gpu/instruction_fusion.h
+++ b/tensorflow/compiler/xla/service/gpu/instruction_fusion.h
@@ -36,6 +36,10 @@ class GpuInstructionFusion : public InstructionFusion {
HloInstruction::FusionKind ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) override;
+
+ // Maximum number of operands allowed on a single fusion node. Exposed
+ // publicly mainly for tests.
+ static constexpr int64 kMaxOperandsPerFusion = 64;
};
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
index 98ba162cd9..229eb23f12 100644
--- a/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/instruction_fusion_test.cc
@@ -606,5 +606,35 @@ TEST_F(InstructionFusionTest, FuseScalarConstant) {
op::Parameter()));
}
+// Check that we limit the number of operands to fusions we create.
+TEST_F(InstructionFusionTest, AvoidsLargeFusion) {
+ constexpr int64 kNumParams = 200;
+ ASSERT_GT(kNumParams, GpuInstructionFusion::kMaxOperandsPerFusion);
+
+ // Compute p0 + p1 + ... + pN.
+ HloComputation::Builder b(TestName());
+ Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
+ auto param0 =
+ b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p"));
+ auto sum = param0;
+ for (int64 i = 1; i < kNumParams; ++i) {
+ auto param =
+ b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p"));
+ sum = b.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, param));
+ }
+ auto module = CreateNewModule();
+ auto computation = module->AddEntryComputation(b.Build());
+ EXPECT_TRUE(GpuInstructionFusion(/*may_duplicate=*/true)
+ .Run(module.get())
+ .ValueOrDie());
+ SCOPED_TRACE(module->ToString());
+ for (const HloInstruction* instr : computation->instructions()) {
+ EXPECT_LE(instr->operand_count(),
+ GpuInstructionFusion::kMaxOperandsPerFusion)
+ << instr->ToString();
+ }
+}
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
index 388aa35d7d..2799baab41 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.cc
@@ -242,15 +242,17 @@ llvm::Value* EmitPrintf(tensorflow::StringPiece fmt,
arguments_ptr});
}
-llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
- llvm::IRBuilder<>* builder) {
+llvm::Value* EmitFullWarpShuffleDown(llvm::Value* value, llvm::Value* offset,
+ llvm::IRBuilder<>* builder) {
int bit_width = value->getType()->getPrimitiveSizeInBits();
+ llvm::Value* all_warps_mask = builder->getInt32(-1);
// Special case for efficiency
if (value->getType()->isFloatTy() && bit_width == 32) {
return llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::nvvm_shfl_down_f32,
- {value, offset, builder->getInt32(kWarpSize - 1)}, {}, builder);
+ llvm::Intrinsic::nvvm_shfl_sync_down_f32,
+ {all_warps_mask, value, offset, builder->getInt32(kWarpSize - 1)}, {},
+ builder);
}
// We must split values wider than 32 bits as the "shfl" instruction operates
@@ -264,10 +266,11 @@ llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
for (int i = 0; i < num_segments; ++i) {
x = builder->CreateInsertElement(
x,
- llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_shfl_down_i32,
- {builder->CreateExtractElement(x, i),
- offset, builder->getInt32(kWarpSize - 1)},
- {}, builder),
+ llvm_ir::EmitCallToIntrinsic(
+ llvm::Intrinsic::nvvm_shfl_sync_down_i32,
+ {all_warps_mask, builder->CreateExtractElement(x, i), offset,
+ builder->getInt32(kWarpSize - 1)},
+ {}, builder),
i);
}
return builder->CreateBitCast(
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
index 59455f389e..9bb4c42b15 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emission_utils.h
@@ -125,13 +125,17 @@ llvm::Value* EmitPrintf(tensorflow::StringPiece fmt,
llvm::IRBuilder<>* builder);
// Emits code to shuffle data between threads of a warp. This has the same
-// semantics as the PTX "shfl.down" instruction [0] but works for values of any
-// size. The last operand of the emitted "shfl" is `kWarpSize - 1`.
+// semantics as the PTX "shfl.sync.down" instruction but works for values that
+// aren't 32 bits in size. The last operand of the emitted "shfl" is
+// `kWarpSize - 1`.
//
-// [0]
-// http://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-shfl
-llvm::Value* EmitShuffleDown(llvm::Value* value, llvm::Value* offset,
- llvm::IRBuilder<>* builder);
+// This function emits a "full-warp" shuffle, which all threads of a warp
+// participate in. *Do not use this function from a divergent context:* You
+// can't correctly do so on both Volta and earlier GPUs.
+//
+// https://docs.nvidia.com/cuda/parallel-thread-execution/#data-movement-and-conversion-instructions-shfl-sync
+llvm::Value* EmitFullWarpShuffleDown(llvm::Value* value, llvm::Value* offset,
+ llvm::IRBuilder<>* builder);
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
index fe83d017f4..f95541cba4 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.cc
@@ -57,12 +57,12 @@ IrEmitter::IrEmitter(const HloModuleConfig& hlo_module_config,
IrEmitterContext* ir_emitter_context, bool is_nested)
: ir_emitter_context_(ir_emitter_context),
module_(ir_emitter_context->llvm_module()),
- ir_builder_(module_->getContext()),
+ b_(module_->getContext()),
bindings_(ir_emitter_context->hlo_module(),
- &ir_emitter_context->buffer_assignment(), &ir_builder_, module_,
+ &ir_emitter_context->buffer_assignment(), &b_, module_,
is_nested),
hlo_module_config_(hlo_module_config) {
- ir_builder_.setFastMathFlags(llvm_ir::GetFastMathFlags(
+ b_.setFastMathFlags(llvm_ir::GetFastMathFlags(
/*fast_math_enabled=*/hlo_module_config.debug_options()
.xla_enable_fast_math()));
}
@@ -71,12 +71,11 @@ Status IrEmitter::DefaultAction(HloInstruction* hlo) {
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (const HloInstruction* operand : hlo->operands()) {
operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) {
- return GetIrArray(*operand, *hlo)
- .EmitReadArrayElement(index, &ir_builder_);
+ return GetIrArray(*operand, *hlo).EmitReadArrayElement(index, &b_);
};
}
return EmitTargetElementLoop(
- *hlo, GpuElementalIrEmitter(hlo_module_config_, module_, &ir_builder_,
+ *hlo, GpuElementalIrEmitter(hlo_module_config_, module_, &b_,
GetNestedComputer())
.MakeElementGenerator(hlo, operand_to_generator));
}
@@ -119,15 +118,10 @@ Status IrEmitter::HandleGetTupleElement(HloInstruction* get_tuple_element) {
get_tuple_element->shape(), get_tuple_element->tuple_index(),
// TODO(b/26344050): tighten the alignment here
// based on the real element type.
- /*alignment=*/1, GetBasePointer(*operand), &ir_builder_, module_));
+ /*alignment=*/1, GetBasePointer(*operand), &b_, module_));
return Status::OK();
}
-Status IrEmitter::HandleSort(HloInstruction*) {
- // TODO(b/26783907): Implement sort on GPU.
- return Unimplemented("sort");
-}
-
Status IrEmitter::HandleSend(HloInstruction*) {
return Unimplemented("Send is not implemented on GPU");
}
@@ -149,8 +143,7 @@ Status IrEmitter::HandleTuple(HloInstruction* tuple) {
for (const HloInstruction* operand : tuple->operands()) {
base_ptrs.push_back(GetBasePointer(*operand));
}
- llvm_ir::EmitTuple(GetIrArray(*tuple, *tuple), base_ptrs, &ir_builder_,
- module_);
+ llvm_ir::EmitTuple(GetIrArray(*tuple, *tuple), base_ptrs, &b_, module_);
return Status::OK();
}
@@ -171,7 +164,7 @@ Status IrEmitter::EmitCallToNestedComputation(
std::vector<llvm::Value*> arguments(operands.begin(), operands.end());
arguments.push_back(output);
arguments.push_back(bindings_.GetTempBufferBase());
- ir_builder_.CreateCall(emitted_function, arguments);
+ b_.CreateCall(emitted_function, arguments);
return Status::OK();
}
@@ -193,21 +186,20 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
computation.root_instruction()->shape().element_type();
bool is_atomic_integral = element_type == S32 || element_type == U32 ||
element_type == S64 || element_type == U64;
- llvm::Value* source = ir_builder_.CreateLoad(source_address, "source");
+ llvm::Value* source = b_.CreateLoad(source_address, "source");
if (root_opcode == HloOpcode::kAdd) {
// NVPTX supports atomicAdd on F32 and integer types.
if (element_type == F32) {
// F32 + F32
llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_atomic_load_add_f32,
{output_address, source},
- {output_address->getType()}, &ir_builder_);
+ {output_address->getType()}, &b_);
return true;
}
if (is_atomic_integral) {
// integral + integral
- ir_builder_.CreateAtomicRMW(llvm::AtomicRMWInst::Add, output_address,
- source,
- llvm::AtomicOrdering::SequentiallyConsistent);
+ b_.CreateAtomicRMW(llvm::AtomicRMWInst::Add, output_address, source,
+ llvm::AtomicOrdering::SequentiallyConsistent);
return true;
}
}
@@ -218,8 +210,8 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
auto opcode = primitive_util::IsSignedIntegralType(element_type)
? llvm::AtomicRMWInst::Max
: llvm::AtomicRMWInst::UMax;
- ir_builder_.CreateAtomicRMW(opcode, output_address, source,
- llvm::AtomicOrdering::SequentiallyConsistent);
+ b_.CreateAtomicRMW(opcode, output_address, source,
+ llvm::AtomicOrdering::SequentiallyConsistent);
return true;
}
@@ -228,8 +220,8 @@ bool IrEmitter::MaybeEmitDirectAtomicOperation(
auto opcode = primitive_util::IsSignedIntegralType(element_type)
? llvm::AtomicRMWInst::Min
: llvm::AtomicRMWInst::UMin;
- ir_builder_.CreateAtomicRMW(opcode, output_address, source,
- llvm::AtomicOrdering::SequentiallyConsistent);
+ b_.CreateAtomicRMW(opcode, output_address, source,
+ llvm::AtomicOrdering::SequentiallyConsistent);
return true;
}
@@ -301,20 +293,20 @@ Status IrEmitter::EmitAtomicOperationUsingCAS(const HloComputation& computation,
llvm::Type* element_address_type = element_type->getPointerTo();
int atomic_size = (element_size < 32) ? 32 : element_size;
- llvm::Type* atomic_type = ir_builder_.getIntNTy(atomic_size);
+ llvm::Type* atomic_type = b_.getIntNTy(atomic_size);
llvm::Type* atomic_address_type =
atomic_type->getPointerTo(output_address_type->getPointerAddressSpace());
// cas_old_output_address and cas_new_output_address point to the scratch
// memory where we store the old and new values for the repeated atomicCAS
// operations.
- llvm::Value* cas_old_output_address = ir_builder_.CreateAlloca(
+ llvm::Value* cas_old_output_address = b_.CreateAlloca(
atomic_type, /*ArraySize=*/nullptr, "cas_old_output_address");
- llvm::Value* cas_new_output_address = ir_builder_.CreateAlloca(
+ llvm::Value* cas_new_output_address = b_.CreateAlloca(
atomic_type, /*ArraySize=*/nullptr, "cas_new_output_address");
// Emit preparation code to the preheader.
- llvm::BasicBlock* loop_preheader_bb = ir_builder_.GetInsertBlock();
+ llvm::BasicBlock* loop_preheader_bb = b_.GetInsertBlock();
llvm::Value* atomic_memory_address;
// binop_output_address points to the scratch memory that stores the
@@ -325,77 +317,71 @@ Status IrEmitter::EmitAtomicOperationUsingCAS(const HloComputation& computation,
CHECK_EQ((element_size % sizeof(char)), 0);
llvm::Type* address_int_type =
module_->getDataLayout().getIntPtrType(output_address_type);
- atomic_memory_address =
- ir_builder_.CreatePtrToInt(output_address, address_int_type);
+ atomic_memory_address = b_.CreatePtrToInt(output_address, address_int_type);
llvm::Value* mask = llvm::ConstantInt::get(address_int_type, 3);
- llvm::Value* offset = ir_builder_.CreateAnd(atomic_memory_address, mask);
+ llvm::Value* offset = b_.CreateAnd(atomic_memory_address, mask);
mask = llvm::ConstantInt::get(address_int_type, -4);
- atomic_memory_address = ir_builder_.CreateAnd(atomic_memory_address, mask);
+ atomic_memory_address = b_.CreateAnd(atomic_memory_address, mask);
atomic_memory_address =
- ir_builder_.CreateIntToPtr(atomic_memory_address, atomic_address_type);
- binop_output_address = ir_builder_.CreateAdd(
- ir_builder_.CreatePtrToInt(cas_new_output_address, address_int_type),
- offset);
+ b_.CreateIntToPtr(atomic_memory_address, atomic_address_type);
+ binop_output_address = b_.CreateAdd(
+ b_.CreatePtrToInt(cas_new_output_address, address_int_type), offset);
binop_output_address =
- ir_builder_.CreateIntToPtr(binop_output_address, element_address_type);
+ b_.CreateIntToPtr(binop_output_address, element_address_type);
} else {
atomic_memory_address =
- ir_builder_.CreateBitCast(output_address, atomic_address_type);
+ b_.CreateBitCast(output_address, atomic_address_type);
binop_output_address =
- ir_builder_.CreateBitCast(cas_new_output_address, element_address_type);
+ b_.CreateBitCast(cas_new_output_address, element_address_type);
}
// Use the value from the memory that atomicCAS operates on to initialize
// cas_old_output.
llvm::Value* cas_old_output =
- ir_builder_.CreateLoad(atomic_memory_address, "cas_old_output");
- ir_builder_.CreateStore(cas_old_output, cas_old_output_address);
+ b_.CreateLoad(atomic_memory_address, "cas_old_output");
+ b_.CreateStore(cas_old_output, cas_old_output_address);
llvm::BasicBlock* loop_exit_bb = loop_preheader_bb->splitBasicBlock(
- ir_builder_.GetInsertPoint(), "atomic_op_loop_exit");
- llvm::BasicBlock* loop_body_bb =
- llvm::BasicBlock::Create(ir_builder_.getContext(), "atomic_op_loop_body",
- ir_builder_.GetInsertBlock()->getParent());
- ir_builder_.SetInsertPoint(loop_body_bb);
+ b_.GetInsertPoint(), "atomic_op_loop_exit");
+ llvm::BasicBlock* loop_body_bb = llvm::BasicBlock::Create(
+ b_.getContext(), "atomic_op_loop_body", b_.GetInsertBlock()->getParent());
+ b_.SetInsertPoint(loop_body_bb);
// Change preheader's successor from loop_exit_bb to loop_body_bb.
loop_preheader_bb->getTerminator()->setSuccessor(0, loop_body_bb);
// Emit the body of the loop that repeatedly invokes atomicCAS.
//
// Use cas_old_output to initialize cas_new_output.
- cas_old_output =
- ir_builder_.CreateLoad(cas_old_output_address, "cas_old_output");
- ir_builder_.CreateStore(cas_old_output, cas_new_output_address);
+ cas_old_output = b_.CreateLoad(cas_old_output_address, "cas_old_output");
+ b_.CreateStore(cas_old_output, cas_new_output_address);
// Emits code to calculate new_output = operation(old_output, source);
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
computation, {binop_output_address, source_address},
binop_output_address));
llvm::Value* cas_new_output =
- ir_builder_.CreateLoad(cas_new_output_address, "cas_new_output");
+ b_.CreateLoad(cas_new_output_address, "cas_new_output");
// Emit code to perform the atomicCAS operation
// (cas_old_output, success) = atomicCAS(memory_address, cas_old_output,
// cas_new_output);
- llvm::Value* ret_value = ir_builder_.CreateAtomicCmpXchg(
+ llvm::Value* ret_value = b_.CreateAtomicCmpXchg(
atomic_memory_address, cas_old_output, cas_new_output,
llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering::SequentiallyConsistent);
// Extract the memory value returned from atomicCAS and store it as
// cas_old_output.
- ir_builder_.CreateStore(
- ir_builder_.CreateExtractValue(ret_value, 0, "cas_old_output"),
- cas_old_output_address);
+ b_.CreateStore(b_.CreateExtractValue(ret_value, 0, "cas_old_output"),
+ cas_old_output_address);
// Extract the success bit returned from atomicCAS and generate a
// conditional branch on the success bit.
- ir_builder_.CreateCondBr(
- ir_builder_.CreateExtractValue(ret_value, 1, "success"), loop_exit_bb,
- loop_body_bb);
+ b_.CreateCondBr(b_.CreateExtractValue(ret_value, 1, "success"), loop_exit_bb,
+ loop_body_bb);
// Set the insertion point to the exit basic block so that the caller of
// this method can continue emitting code to the right place.
- SetToFirstInsertPoint(loop_exit_bb, &ir_builder_);
+ SetToFirstInsertPoint(loop_exit_bb, &b_);
return Status::OK();
}
@@ -438,32 +424,32 @@ Status IrEmitter::HandleTupleSelect(HloInstruction* tuple_select) {
llvm_ir::EmitTupleSelect(GetIrArray(*tuple_select, *tuple_select),
GetIrArray(*pred, *tuple_select),
GetBasePointer(*on_true), GetBasePointer(*on_false),
- &ir_builder_, module_);
+ &b_, module_);
return Status::OK();
}
namespace {
-llvm::Value* Real(llvm::Value* x, llvm::IRBuilder<>* ir_builder) {
- return ir_builder->CreateExtractValue(x, {0});
-}
-
-llvm::Value* Imag(llvm::Value* x, llvm::IRBuilder<>* ir_builder) {
- return ir_builder->CreateExtractValue(x, {1});
-}
-
-std::pair<llvm::Value*, llvm::Value*> MultiplyComplex(
- llvm::Value* lhs_value, llvm::Value* rhs_value,
- llvm::IRBuilder<>* ir_builder) {
- llvm::Value* lhs_real = Real(lhs_value, ir_builder);
- llvm::Value* lhs_imag = Imag(lhs_value, ir_builder);
- llvm::Value* rhs_real = Real(rhs_value, ir_builder);
- llvm::Value* rhs_imag = Imag(rhs_value, ir_builder);
- llvm::Value* real_result1 = ir_builder->CreateFMul(lhs_real, rhs_real);
- llvm::Value* real_result2 = ir_builder->CreateFMul(lhs_imag, rhs_imag);
- llvm::Value* real_result = ir_builder->CreateFSub(real_result1, real_result2);
- llvm::Value* imag_result1 = ir_builder->CreateFMul(lhs_real, rhs_imag);
- llvm::Value* imag_result2 = ir_builder->CreateFMul(lhs_imag, rhs_real);
- llvm::Value* imag_result = ir_builder->CreateFAdd(imag_result1, imag_result2);
+llvm::Value* Real(llvm::Value* x, llvm::IRBuilder<>* b) {
+ return b->CreateExtractValue(x, {0});
+}
+
+llvm::Value* Imag(llvm::Value* x, llvm::IRBuilder<>* b) {
+ return b->CreateExtractValue(x, {1});
+}
+
+std::pair<llvm::Value*, llvm::Value*> MultiplyComplex(llvm::Value* lhs_value,
+ llvm::Value* rhs_value,
+ llvm::IRBuilder<>* b) {
+ llvm::Value* lhs_real = Real(lhs_value, b);
+ llvm::Value* lhs_imag = Imag(lhs_value, b);
+ llvm::Value* rhs_real = Real(rhs_value, b);
+ llvm::Value* rhs_imag = Imag(rhs_value, b);
+ llvm::Value* real_result1 = b->CreateFMul(lhs_real, rhs_real);
+ llvm::Value* real_result2 = b->CreateFMul(lhs_imag, rhs_imag);
+ llvm::Value* real_result = b->CreateFSub(real_result1, real_result2);
+ llvm::Value* imag_result1 = b->CreateFMul(lhs_real, rhs_imag);
+ llvm::Value* imag_result2 = b->CreateFMul(lhs_imag, rhs_real);
+ llvm::Value* imag_result = b->CreateFAdd(imag_result1, imag_result2);
return {real_result, imag_result};
}
} // namespace
@@ -479,25 +465,24 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
const Shape& rhs_shape = rhs_instruction->shape();
// TODO(b/110211620): Convert to use i32 index_type when it is possible.
- llvm::Type* index_type = ir_builder_.getInt64Ty();
+ llvm::Type* index_type = b_.getInt64Ty();
llvm_ir::IrArray::Index element_index(index_type);
if (ShapeUtil::IsScalar(lhs_shape) && ShapeUtil::IsScalar(rhs_shape)) {
// If the operands are scalar, don't emit any loops.
llvm::Value* lhs_value =
- lhs_array.EmitReadArrayElement(/*index=*/element_index, &ir_builder_);
+ lhs_array.EmitReadArrayElement(/*index=*/element_index, &b_);
llvm::Value* rhs_value =
- rhs_array.EmitReadArrayElement(/*index=*/element_index, &ir_builder_);
+ rhs_array.EmitReadArrayElement(/*index=*/element_index, &b_);
llvm::Value* result;
if (ShapeUtil::ElementIsComplex(lhs_shape)) {
- auto value = MultiplyComplex(lhs_value, rhs_value, &ir_builder_);
+ auto value = MultiplyComplex(lhs_value, rhs_value, &b_);
result = llvm::ConstantAggregateZero::get(lhs_array.GetElementLlvmType());
- result = ir_builder_.CreateInsertValue(result, value.first, {0});
- result = ir_builder_.CreateInsertValue(result, value.second, {1});
+ result = b_.CreateInsertValue(result, value.first, {0});
+ result = b_.CreateInsertValue(result, value.second, {1});
} else {
- result = ir_builder_.CreateFMul(lhs_value, rhs_value);
+ result = b_.CreateFMul(lhs_value, rhs_value);
}
- target_array.EmitWriteArrayElement(/*index=*/element_index, result,
- &ir_builder_);
+ target_array.EmitWriteArrayElement(/*index=*/element_index, result, &b_);
return Status::OK();
}
@@ -524,11 +509,11 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
// Create loop nests which loop through the LHS operand dimensions and the RHS
// operand dimensions. The reduction dimension of the LHS and RHS are handled
// in a separate innermost loop which performs the sum of products.
- llvm_ir::ForLoopNest loop_nest(IrName(dot), &ir_builder_);
- llvm_ir::IrArray::Index lhs_index = EmitOperandArrayLoopNest(
- lhs_array, lhs_reduction_dimension, "lhs", &loop_nest);
- llvm_ir::IrArray::Index rhs_index = EmitOperandArrayLoopNest(
- rhs_array, rhs_reduction_dimension, "rhs", &loop_nest);
+ llvm_ir::ForLoopNest loop_nest(IrName(dot), &b_);
+ llvm_ir::IrArray::Index lhs_index = loop_nest.EmitOperandArrayLoopNest(
+ lhs_array, /*dimension_to_skip=*/lhs_reduction_dimension, "lhs");
+ llvm_ir::IrArray::Index rhs_index = loop_nest.EmitOperandArrayLoopNest(
+ rhs_array, /*dimension_to_skip=*/rhs_reduction_dimension, "rhs");
// Create the reduction loop which does the sum of products reduction.
std::unique_ptr<llvm_ir::ForLoop> reduction_loop = loop_nest.AddLoop(
@@ -548,7 +533,7 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
llvm::Value* accum_address = llvm_ir::EmitAllocaAtFunctionEntry(
accum_type, // The pointee type of the alloca instruction.
"accum_address", // The name of the alloca instruction.
- &ir_builder_);
+ &b_);
// Initialize the accumulator in the preheader to zero.
new llvm::StoreInst(
@@ -562,27 +547,25 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
// updated_accum = accum + lhs_element * rhs_element
// *accum_address = updated_accum
TF_RET_CHECK(!reduction_loop->GetBodyBasicBlock()->empty());
- ir_builder_.SetInsertPoint(
+ b_.SetInsertPoint(
&*reduction_loop->GetBodyBasicBlock()->getFirstInsertionPt());
- llvm::Value* lhs_element =
- lhs_array.EmitReadArrayElement(lhs_index, &ir_builder_);
- llvm::Value* rhs_element =
- rhs_array.EmitReadArrayElement(rhs_index, &ir_builder_);
- llvm::Value* accum = ir_builder_.CreateLoad(accum_address);
+ llvm::Value* lhs_element = lhs_array.EmitReadArrayElement(lhs_index, &b_);
+ llvm::Value* rhs_element = rhs_array.EmitReadArrayElement(rhs_index, &b_);
+ llvm::Value* accum = b_.CreateLoad(accum_address);
llvm::Value* updated_accum;
if (ShapeUtil::ElementIsComplex(lhs_shape)) {
- auto value = MultiplyComplex(lhs_element, rhs_element, &ir_builder_);
- llvm::Value* accum_real = Real(accum, &ir_builder_);
- llvm::Value* real_sum = ir_builder_.CreateFAdd(accum_real, value.first);
- updated_accum = ir_builder_.CreateInsertValue(accum, real_sum, {0});
- llvm::Value* accum_imag = Imag(accum, &ir_builder_);
- llvm::Value* imag_sum = ir_builder_.CreateFAdd(accum_imag, value.second);
- updated_accum = ir_builder_.CreateInsertValue(updated_accum, imag_sum, {1});
+ auto value = MultiplyComplex(lhs_element, rhs_element, &b_);
+ llvm::Value* accum_real = Real(accum, &b_);
+ llvm::Value* real_sum = b_.CreateFAdd(accum_real, value.first);
+ updated_accum = b_.CreateInsertValue(accum, real_sum, {0});
+ llvm::Value* accum_imag = Imag(accum, &b_);
+ llvm::Value* imag_sum = b_.CreateFAdd(accum_imag, value.second);
+ updated_accum = b_.CreateInsertValue(updated_accum, imag_sum, {1});
} else {
- llvm::Value* product = ir_builder_.CreateFMul(lhs_element, rhs_element);
- updated_accum = ir_builder_.CreateFAdd(accum, product);
+ llvm::Value* product = b_.CreateFMul(lhs_element, rhs_element);
+ updated_accum = b_.CreateFAdd(accum, product);
}
- ir_builder_.CreateStore(updated_accum, accum_address);
+ b_.CreateStore(updated_accum, accum_address);
// After the reduction loop exits, store the accumulator into the target
// address. The index into the target address is the concatenation of the rhs
@@ -599,16 +582,15 @@ Status IrEmitter::HandleDot(HloInstruction* dot) {
target_index.push_back(rhs_index[dimension]);
}
}
- SetToFirstInsertPoint(reduction_loop->GetExitBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(reduction_loop->GetExitBasicBlock(), &b_);
target_array.EmitWriteArrayElement(
target_index,
- ir_builder_.CreateLoad(
- accum_address), // The value written to the target array.
- &ir_builder_);
+ b_.CreateLoad(accum_address), // The value written to the target array.
+ &b_);
// Set the IR builder insert point to the exit basic block of the outer most
// loop. This ensures later instructions are inserted after this loop nest.
- ir_builder_.SetInsertPoint(loop_nest.GetOuterLoopExitBasicBlock());
+ b_.SetInsertPoint(loop_nest.GetOuterLoopExitBasicBlock());
return Status::OK();
}
@@ -650,11 +632,10 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) {
[=](const llvm_ir::IrArray::Index& index) -> StatusOr<llvm::Value*> {
// Initialize an accumulator with init_value.
llvm::AllocaInst* accumulator_addr =
- ir_builder_.CreateAlloca(llvm_ir::PrimitiveTypeToIrType(
+ b_.CreateAlloca(llvm_ir::PrimitiveTypeToIrType(
reduce->shape().element_type(), module_));
- ir_builder_.CreateStore(
- ir_builder_.CreateLoad(GetBasePointer(*init_value)),
- accumulator_addr);
+ b_.CreateStore(b_.CreateLoad(GetBasePointer(*init_value)),
+ accumulator_addr);
// The enclosing loops go over all the target elements. Now we have to
// compute the actual target element. For this, we build a new loop nest
@@ -662,12 +643,12 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) {
// AddLoopsForShapeOnDimensions will return an Index where induction
// Value*s are placed for each dimension in dimensions, and all the rest
// are nullptrs.
- llvm_ir::ForLoopNest loops(IrName(reduce, "inner"), &ir_builder_);
+ llvm_ir::ForLoopNest loops(IrName(reduce, "inner"), &b_);
const llvm_ir::IrArray::Index reduced_dims_index =
loops.AddLoopsForShapeOnDimensions(arg->shape(), dimensions,
"reduction_dim");
- SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &ir_builder_);
+ SetToFirstInsertPoint(loops.GetInnerLoopBodyBasicBlock(), &b_);
// Build a full index for the input argument, using reduced_dims_index
// as the base. In reduced_dims_index only the reduction dimensions are
@@ -686,13 +667,12 @@ Status IrEmitter::HandleReduce(HloInstruction* reduce) {
// Apply the reduction function to the loaded value.
llvm::Value* input_address =
- GetIrArray(*arg, *reduce)
- .EmitArrayElementAddress(input_index, &ir_builder_);
+ GetIrArray(*arg, *reduce).EmitArrayElementAddress(input_index, &b_);
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*function, {accumulator_addr, input_address}, accumulator_addr));
- SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &ir_builder_);
- return ir_builder_.CreateLoad(accumulator_addr);
+ SetToFirstInsertPoint(loops.GetOuterLoopExitBasicBlock(), &b_);
+ return b_.CreateLoad(accumulator_addr);
});
}
@@ -705,8 +685,8 @@ Status IrEmitter::HandleFusion(HloInstruction* fusion) {
for (HloInstruction* operand : fusion->operands()) {
parameter_arrays.push_back(GetIrArray(*operand, *fusion));
}
- GpuElementalIrEmitter elemental_emitter(hlo_module_config_, module_,
- &ir_builder_, GetNestedComputer());
+ GpuElementalIrEmitter elemental_emitter(hlo_module_config_, module_, &b_,
+ GetNestedComputer());
FusedIrEmitter fused_emitter(parameter_arrays, &elemental_emitter);
TF_RETURN_IF_ERROR(fusion->fused_expression_root()->Accept(&fused_emitter));
@@ -740,17 +720,16 @@ Status IrEmitter::HandleRng(HloInstruction* random) {
ElementalIrEmitter::HloToElementGeneratorMap operand_to_generator;
for (const HloInstruction* operand : random->operands()) {
operand_to_generator[operand] = [=](const llvm_ir::IrArray::Index& index) {
- return GetIrArray(*operand, *random)
- .EmitReadArrayElement(index, &ir_builder_);
+ return GetIrArray(*operand, *random).EmitReadArrayElement(index, &b_);
};
}
// Emits a single-threaded loop because the loop body generated by the element
// generator for Rng can't be parallelized (b/32333178).
return llvm_ir::LoopEmitter(
- GpuElementalIrEmitter(hlo_module_config_, module_, &ir_builder_,
+ GpuElementalIrEmitter(hlo_module_config_, module_, &b_,
GetNestedComputer())
.MakeElementGenerator(random, operand_to_generator),
- GetIrArray(*random, *random), &ir_builder_)
+ GetIrArray(*random, *random), &b_)
.EmitLoop(IrName(random));
}
@@ -777,34 +756,9 @@ Status IrEmitter::HandleBatchNormGrad(HloInstruction*) {
"to a cudnn CustomCall using CudnnBatchNormRewriter.");
}
-llvm_ir::IrArray::Index IrEmitter::EmitOperandArrayLoopNest(
- const llvm_ir::IrArray& operand_array, int64 reduction_dimension,
- tensorflow::StringPiece name_suffix, llvm_ir::ForLoopNest* loop_nest) {
- // Prepares the dimension list we will use to emit the loop nest. Outermost
- // loops are added first. Add loops in major-to-minor order, and skip the
- // reduction dimension.
- std::vector<int64> dimensions;
- const Shape& shape = operand_array.GetShape();
- for (int i = 0; i < LayoutUtil::MinorToMajor(shape).size(); ++i) {
- int64 dimension = LayoutUtil::Major(shape.layout(), i);
- if (dimension != reduction_dimension) {
- dimensions.push_back(dimension);
- }
- }
-
- // Create loop nest with one for-loop for each dimension of the
- // output.
- llvm_ir::IrArray::Index index =
- loop_nest->AddLoopsForShapeOnDimensions(shape, dimensions, name_suffix);
- // Verify every dimension except the reduction dimension was set in the index.
- for (size_t dimension = 0; dimension < index.size(); ++dimension) {
- if (dimension == reduction_dimension) {
- DCHECK_EQ(nullptr, index[dimension]);
- } else {
- DCHECK_NE(nullptr, index[dimension]);
- }
- }
- return index;
+Status IrEmitter::HandleIota(HloInstruction*) {
+ // TODO(b/64798317): implement iota on GPU.
+ return Unimplemented("Iota is not implemented on GPU.");
}
StatusOr<llvm::Value*> IrEmitter::ComputeNestedElement(
@@ -813,16 +767,16 @@ StatusOr<llvm::Value*> IrEmitter::ComputeNestedElement(
llvm::Value* return_buffer = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(
computation.root_instruction()->shape().element_type(), module_),
- "return_buffer", &ir_builder_);
+ "return_buffer", &b_);
std::vector<llvm::Value*> parameter_buffers;
for (llvm::Value* parameter_element : parameter_elements) {
parameter_buffers.push_back(llvm_ir::EmitAllocaAtFunctionEntry(
- parameter_element->getType(), "parameter_buffer", &ir_builder_));
- ir_builder_.CreateStore(parameter_element, parameter_buffers.back());
+ parameter_element->getType(), "parameter_buffer", &b_));
+ b_.CreateStore(parameter_element, parameter_buffers.back());
}
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(computation, parameter_buffers,
return_buffer));
- return ir_builder_.CreateLoad(return_buffer);
+ return b_.CreateLoad(return_buffer);
}
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter.h b/tensorflow/compiler/xla/service/gpu/ir_emitter.h
index d2dd335f10..e89967a378 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter.h
@@ -79,7 +79,6 @@ class IrEmitter : public DfsHloVisitorWithDefault {
Status HandleCrossReplicaSum(HloInstruction* crs) override;
Status HandleInfeed(HloInstruction* infeed) override;
Status HandleOutfeed(HloInstruction* outfeed) override;
- Status HandleSort(HloInstruction* sort) override;
Status HandleSend(HloInstruction* send) override;
Status HandleSendDone(HloInstruction* send_done) override;
Status HandleRecv(HloInstruction* recv) override;
@@ -96,6 +95,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
Status HandleBatchNormInference(HloInstruction* batch_norm) override;
Status HandleBatchNormTraining(HloInstruction* batch_norm) override;
Status HandleBatchNormGrad(HloInstruction* batch_norm) override;
+ Status HandleIota(HloInstruction* iota) override;
Status FinishVisit(HloInstruction* root) override { return Status::OK(); }
@@ -162,7 +162,7 @@ class IrEmitter : public DfsHloVisitorWithDefault {
// The following fields track the IR emission state. According to LLVM memory
// management rules, their memory is owned by the module.
- llvm::IRBuilder<> ir_builder_;
+ llvm::IRBuilder<> b_;
// Mapping from HLO to its underlying LLVM value.
HloToIrBindings bindings_;
@@ -171,17 +171,6 @@ class IrEmitter : public DfsHloVisitorWithDefault {
const HloModuleConfig& hlo_module_config_;
private:
- // Emits a series of nested loops for iterating over an operand array in the
- // dot operation. Loops are constructed in major to minor dimension layout
- // order. No loop is emitted for the given reduction_dimension. The function
- // returns an IrArray index for the given operand_array containing the indvars
- // of the loops. All dimensions of the index are filled except for the
- // reduction dimension. name_suffix is the string to append to the names of
- // LLVM constructs (eg, basic blocks) constructed by this method.
- llvm_ir::IrArray::Index EmitOperandArrayLoopNest(
- const llvm_ir::IrArray& operand_array, int64 reduction_dimension,
- tensorflow::StringPiece name_suffix, llvm_ir::ForLoopNest* loop_nest);
-
// A helper method for EmitAtomicOperationForNestedComputation. Certain
// computations, such as floating-point addition and integer maximization, can
// be simply implemented using an LLVM atomic instruction. If "computation" is
@@ -198,6 +187,13 @@ class IrEmitter : public DfsHloVisitorWithDefault {
llvm::Value* output_address,
llvm::Value* source_address);
+ // A helper method for HandleSort(). It adds the inner comparison loop where
+ // we compare elements pointed to by 'keys_index' and 'compare_keys_index'.
+ void EmitCompareLoop(int64 dimension_to_sort,
+ const llvm_ir::IrArray::Index& keys_index,
+ const llvm_ir::IrArray::Index& compare_keys_index,
+ const llvm_ir::IrArray& keys_array);
+
StatusOr<llvm::Value*> ComputeNestedElement(
const HloComputation& computation,
tensorflow::gtl::ArraySlice<llvm::Value*> parameter_elements);
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc
index c9574c87a3..5c827e5f9c 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_nested.cc
@@ -70,10 +70,10 @@ llvm::Function* IrEmitterNested::EmitBasePointersForNestedComputation(
argument_dereferenceable_bytes.push_back(root_size);
}
// The base pointer of the memory block for all pre-allocated temp buffers.
- argument_types.push_back(ir_builder_.getInt8PtrTy());
+ argument_types.push_back(b_.getInt8PtrTy());
llvm::FunctionType* function_type =
- llvm::FunctionType::get(ir_builder_.getVoidTy(), argument_types, false);
+ llvm::FunctionType::get(b_.getVoidTy(), argument_types, false);
llvm::Function* function = llvm::Function::Create(
function_type, // The function type.
llvm::GlobalValue::InternalLinkage, // The linkage type.
@@ -96,8 +96,7 @@ llvm::Function* IrEmitterNested::EmitBasePointersForNestedComputation(
llvm::BasicBlock::Create(function->getContext(), "entry", function);
// Emit a "return void" at entry_bb's end, and sets the insert point before
// that return instruction.
- ir_builder_.SetInsertPoint(
- llvm::ReturnInst::Create(function->getContext(), entry_bb));
+ b_.SetInsertPoint(llvm::ReturnInst::Create(function->getContext(), entry_bb));
std::vector<const HloInstruction*> non_io_hlos;
for (const auto* hlo : nested_computation.instructions()) {
@@ -127,20 +126,17 @@ Status IrEmitterNested::EmitTargetElementLoop(
target_arrays.push_back(GetIrArray(hlo, hlo, {i}));
}
TF_RETURN_IF_ERROR(
- llvm_ir::LoopEmitter(element_generator, target_arrays, &ir_builder_)
- .EmitLoop());
+ llvm_ir::LoopEmitter(element_generator, target_arrays, &b_).EmitLoop());
std::vector<llvm::Value*> tuple_operand_ptrs;
tuple_operand_ptrs.reserve(num_elems);
for (const llvm_ir::IrArray& array : target_arrays) {
tuple_operand_ptrs.push_back(array.GetBasePointer());
}
- llvm_ir::EmitTuple(GetIrArray(hlo, hlo), tuple_operand_ptrs, &ir_builder_,
- module_);
+ llvm_ir::EmitTuple(GetIrArray(hlo, hlo), tuple_operand_ptrs, &b_, module_);
return Status::OK();
}
- return llvm_ir::LoopEmitter(element_generator, GetIrArray(hlo, hlo),
- &ir_builder_)
+ return llvm_ir::LoopEmitter(element_generator, GetIrArray(hlo, hlo), &b_)
.EmitLoop();
}
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index 673ba530df..1f31a7f36b 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -59,10 +59,11 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h"
#include "tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h"
#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
-#include "tensorflow/compiler/xla/service/llvm_ir/ops.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/sort_util.h"
#include "tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h"
#include "tensorflow/compiler/xla/service/name_uniquer.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -71,6 +72,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/window_util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
@@ -213,7 +215,7 @@ llvm::Function* IrEmitterUnnested::BuildKernelPrototype(
llvm::LLVMContext& context = module->getContext();
llvm::FunctionType* kernel_type = llvm::FunctionType::get(
/*Result=*/llvm::Type::getVoidTy(context),
- std::vector<llvm::Type*>(args.size(), ir_builder_.getInt8PtrTy()),
+ std::vector<llvm::Type*>(args.size(), b_.getInt8PtrTy()),
/*isVarArg=*/false);
llvm::Function* kernel =
llvm::Function::Create(kernel_type, llvm::GlobalValue::ExternalLinkage,
@@ -230,7 +232,9 @@ llvm::Function* IrEmitterUnnested::BuildKernelPrototype(
kernel->addDereferenceableAttr(arg_no + 1, alloc->size());
kernel->addParamAttr(
arg_no, llvm::Attribute::get(context, llvm::Attribute::Alignment,
- kCudaMallocAlignBytes));
+ alloc->is_entry_computation_parameter()
+ ? kEntryParameterAlignBytes
+ : kXlaAllocatedBufferAlignBytes));
if (alloc->IsPreallocatedTempBuffer()) {
fn_arg->setName("temp_buf");
@@ -249,7 +253,7 @@ llvm::Function* IrEmitterUnnested::BuildKernelPrototype(
nvvm_annotations_node->addOperand(llvm::MDNode::get(
context, {llvm::ConstantAsMetadata::get(kernel),
llvm::MDString::get(context, "kernel"),
- llvm::ConstantAsMetadata::get(ir_builder_.getInt32(1))}));
+ llvm::ConstantAsMetadata::get(b_.getInt32(1))}));
// Update the insert point to the entry basic block.
llvm::BasicBlock* entry_bb =
@@ -257,7 +261,7 @@ llvm::Function* IrEmitterUnnested::BuildKernelPrototype(
// Emit a "return void" at entry_bb's end, and set the insert point before
// that return instruction.
- ir_builder_.SetInsertPoint(llvm::ReturnInst::Create(context, entry_bb));
+ b_.SetInsertPoint(llvm::ReturnInst::Create(context, entry_bb));
return kernel;
}
@@ -295,7 +299,7 @@ int ComputeMaxUnrollFactor(const HloInstruction* hlo) {
// range of i32.
// Otherwise, the return type is i64.
llvm::Type* GetIndexTypeForKernel(const HloInstruction* hlo, int64 launch_size,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
// Find the unnested hlo instructon for which the kernel is generated for.
const HloInstruction* unnested_hlo = hlo;
const HloComputation* computation = hlo->parent();
@@ -316,7 +320,7 @@ llvm::Type* GetIndexTypeForKernel(const HloInstruction* hlo, int64 launch_size,
return in_range;
};
- llvm::Type* i64_ty = ir_builder->getInt64Ty();
+ llvm::Type* i64_ty = b->getInt64Ty();
// Check launch dimension
if (!IsInt32(launch_size)) {
return i64_ty;
@@ -345,7 +349,7 @@ llvm::Type* GetIndexTypeForKernel(const HloInstruction* hlo, int64 launch_size,
}
}
- return ir_builder->getInt32Ty();
+ return b->getInt32Ty();
}
} // namespace
@@ -600,8 +604,8 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
parameter_arrays.push_back(GetIrArray(*operand, *fusion));
}
GpuElementalIrEmitter elemental_emitter(
- hlo_module_config_, ir_emitter_context_->llvm_module(),
- &ir_builder_, GetNestedComputer());
+ hlo_module_config_, ir_emitter_context_->llvm_module(), &b_,
+ GetNestedComputer());
FusedIrEmitter fused_emitter(parameter_arrays, &elemental_emitter);
TF_RETURN_IF_ERROR(root->Accept(&fused_emitter));
@@ -674,7 +678,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
}
GpuElementalIrEmitter elemental_emitter(hlo_module_config_,
ir_emitter_context_->llvm_module(),
- &ir_builder_, GetNestedComputer());
+ &b_, GetNestedComputer());
// Shape of the dynamic-update-slice's "update" operand.
Shape update_shape = root->operand(1)->shape();
@@ -692,7 +696,7 @@ Status IrEmitterUnnested::HandleFusion(HloInstruction* fusion) {
return llvm_ir::EmitParallelFusedDynamicUpdateSliceInPlace(
fusion, operand_arrays, output_array, &elemental_emitter,
- launch_dimensions, &ir_builder_);
+ launch_dimensions, &b_);
}
if (ImplementedAsGemm(*fusion)) {
@@ -740,11 +744,11 @@ Status IrEmitterUnnested::EmitExtraOutputsForReduce(
const HloInstruction* output = reduce->parent()->FusionInstruction();
llvm::Value* extra_output_address =
GetIrArray(*output, *output, extra_output_gens[i].second)
- .EmitArrayElementAddress(index, &ir_builder_,
+ .EmitArrayElementAddress(index, &b_,
"extra_output_element_address");
TF_ASSIGN_OR_RETURN(llvm::Value* const extra_output_ir_value,
extra_output_gens[i].first(index));
- ir_builder_.CreateStore(extra_output_ir_value, extra_output_address);
+ b_.CreateStore(extra_output_ir_value, extra_output_address);
}
return Status::OK();
}
@@ -774,8 +778,8 @@ Status IrEmitterUnnested::EmitReductionToScalar(
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
tiled_input_shape, ir_emitter_context_->device_description());
- llvm::Type* index_ty = GetIndexTypeForKernel(
- reduce, launch_dimensions.launch_bound(), &ir_builder_);
+ llvm::Type* index_ty =
+ GetIndexTypeForKernel(reduce, launch_dimensions.launch_bound(), &b_);
auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
@@ -825,52 +829,51 @@ Status IrEmitterUnnested::EmitReductionToScalar(
llvm_ir::PrimitiveTypeToIrType(input_shape.element_type(), module_);
std::vector<llvm::Value*> partial_reduction_result_addresses;
for (int i = 0; i != num_reduces; ++i) {
- llvm::Value* partial_reduction_result_address = ir_builder_.CreateAlloca(
- element_ir_type, /*ArraySize=*/nullptr,
- "partial_reduction_result." + llvm::Twine(i));
+ llvm::Value* partial_reduction_result_address =
+ b_.CreateAlloca(element_ir_type, /*ArraySize=*/nullptr,
+ "partial_reduction_result." + llvm::Twine(i));
TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
init_value_gens[i](IrArray::Index(index_ty)));
- ir_builder_.CreateStore(init_ir_value, partial_reduction_result_address);
+ b_.CreateStore(init_ir_value, partial_reduction_result_address);
partial_reduction_result_addresses.push_back(
partial_reduction_result_address);
}
llvm::Value* x_in_tiles = tile_index[0];
- x_in_tiles = ir_builder_.CreateZExtOrTrunc(x_in_tiles, index_ty);
+ x_in_tiles = b_.CreateZExtOrTrunc(x_in_tiles, index_ty);
// Emit an inner for-loop that reduces the elements in the tile.
auto emit_tile_element_loop = [=](bool tile_in_bounds) -> Status {
std::unique_ptr<llvm_ir::ForLoop> tile_element_loop =
- llvm_ir::ForLoop::EmitForLoop("element_id_in_tile",
- index_typed_constant(0),
- index_typed_constant(kTileSize),
- index_typed_constant(1), &ir_builder_);
+ llvm_ir::ForLoop::EmitForLoop(
+ "element_id_in_tile", index_typed_constant(0),
+ index_typed_constant(kTileSize), index_typed_constant(1), &b_);
// Emit the body of the partial reduction loop.
llvm_ir::SetToFirstInsertPoint(tile_element_loop->GetBodyBasicBlock(),
- &ir_builder_);
- llvm::Value* x = ir_builder_.CreateNSWAdd(
- ir_builder_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileSize)),
+ &b_);
+ llvm::Value* x = b_.CreateNSWAdd(
+ b_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileSize)),
tile_element_loop->GetIndVarValue());
// Unless we know the tile is entirely in bounds, we have to emit a
// x-in-bounds check before reading from the input.
if (!tile_in_bounds) {
llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpULT(x, index_typed_constant(num_elems)),
- "x_in_bounds", &ir_builder_);
+ b_.CreateICmpULT(x, index_typed_constant(num_elems)), "x_in_bounds",
+ &b_);
// Emit code that reads the input element and accumulates it to
// the partial reduction result.
- llvm_ir::SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_data.true_block, &b_);
}
IrArray::Index input_index(
- /*linear=*/x, input_shape, &ir_builder_);
- llvm::Value* input_address = ir_builder_.CreateAlloca(element_ir_type);
+ /*linear=*/x, input_shape, &b_);
+ llvm::Value* input_address = b_.CreateAlloca(element_ir_type);
for (int i = 0; i != num_reduces; ++i) {
TF_ASSIGN_OR_RETURN(llvm::Value* const input_ir_value,
input_gens[i](input_index));
- ir_builder_.CreateStore(input_ir_value, input_address);
+ b_.CreateStore(input_ir_value, input_address);
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*reducers[i],
{partial_reduction_result_addresses[i], input_address},
@@ -881,49 +884,48 @@ Status IrEmitterUnnested::EmitReductionToScalar(
// x_end = kTileSize + x_in_tiles * kTileSize, i.e., the location that's
// immediately beyond the tile.
- llvm::Value* x_end = ir_builder_.CreateNSWAdd(
+ llvm::Value* x_end = b_.CreateNSWAdd(
index_typed_constant(kTileSize),
- ir_builder_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileSize)));
+ b_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileSize)));
// The tile is entirely in bound if all_threads_in_bounds or
// x_end <= num_elems.
- llvm::Value* tile_in_bounds = ir_builder_.CreateOr(
- ir_builder_.CreateICmpULE(x_end, index_typed_constant(num_elems)),
- ir_builder_.getInt1(all_threads_in_bounds));
+ llvm::Value* tile_in_bounds =
+ b_.CreateOr(b_.CreateICmpULE(x_end, index_typed_constant(num_elems)),
+ b_.getInt1(all_threads_in_bounds));
llvm_ir::LlvmIfData if_tile_in_bounds_data =
- llvm_ir::EmitIfThenElse(tile_in_bounds, "tile_in_bounds", &ir_builder_);
- llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.true_block,
- &ir_builder_);
+ llvm_ir::EmitIfThenElse(tile_in_bounds, "tile_in_bounds", &b_);
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.true_block, &b_);
TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_bounds=*/true));
- llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.false_block,
- &ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.false_block, &b_);
TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_bounds=*/false));
// After the if-then-else statement on tile_in_bounds, emit calls to
// shfl_down that accumulate the partial reduction results of all threads
// from the warp.
- llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.after_block,
- &ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.after_block, &b_);
int bit_width = llvm_ir::GetSizeInBits(element_ir_type);
// bitcast cannot be applied to aggregate types (even packed ones), so we
// instead bitcast addresses of load/store to intN* of the same bit-width.
llvm::Type* shuffle_ir_type = element_ir_type->isStructTy()
- ? ir_builder_.getIntNTy(bit_width)
+ ? b_.getIntNTy(bit_width)
: element_ir_type;
for (int shuffle_distance = kWarpSize / 2; shuffle_distance >= 1;
shuffle_distance /= 2) {
- llvm::Value* result_from_other_lane = ir_builder_.CreateAlloca(
- element_ir_type, nullptr, "result_from_other_lane");
+ llvm::Value* result_from_other_lane =
+ b_.CreateAlloca(element_ir_type, nullptr, "result_from_other_lane");
for (int i = 0; i != num_reduces; ++i) {
- llvm::Value* partial_reduction_result = ir_builder_.CreateLoad(
- ir_builder_.CreateBitCast(partial_reduction_result_addresses[i],
- shuffle_ir_type->getPointerTo()),
+ llvm::Value* partial_reduction_result = b_.CreateLoad(
+ b_.CreateBitCast(partial_reduction_result_addresses[i],
+ shuffle_ir_type->getPointerTo()),
"partial_reduction_result");
- ir_builder_.CreateStore(
- EmitShuffleDown(partial_reduction_result,
- ir_builder_.getInt32(shuffle_distance),
- &ir_builder_),
- ir_builder_.CreateBitCast(result_from_other_lane,
- shuffle_ir_type->getPointerTo()));
+ CHECK_EQ(launch_dimensions.threads_per_block() % kWarpSize, 0)
+ << "Requires block size a multiple of the warp size, otherwise we "
+ "will read undefined elements.";
+ b_.CreateStore(
+ EmitFullWarpShuffleDown(partial_reduction_result,
+ b_.getInt32(shuffle_distance), &b_),
+ b_.CreateBitCast(result_from_other_lane,
+ shuffle_ir_type->getPointerTo()));
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*reducers[i],
{partial_reduction_result_addresses[i], result_from_other_lane},
@@ -937,24 +939,23 @@ Status IrEmitterUnnested::EmitReductionToScalar(
// Emit an atomic operation that accumulates the partial reduction result of
// lane 0 (which holds the partially accumulated result for its warp) to the
// output element.
- llvm::Value* lane_id = ir_builder_.CreateURem(
- x_in_tiles, index_typed_constant(kWarpSize), "lane_id");
+ llvm::Value* lane_id =
+ b_.CreateURem(x_in_tiles, index_typed_constant(kWarpSize), "lane_id");
llvm_ir::LlvmIfData if_lane_id_is_zero_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpEQ(lane_id, index_typed_constant(0)),
- "lane_id_is_zero", &ir_builder_);
- llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block,
- &ir_builder_);
+ b_.CreateICmpEQ(lane_id, index_typed_constant(0)), "lane_id_is_zero",
+ &b_);
+ llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block, &b_);
for (int i = 0; i != num_reduces; ++i) {
llvm::Value* output_address =
GetIrArray(*output, *output, reduce_output_shapes[i])
.EmitArrayElementAddress(
IrArray::Index(
- /*linear=*/ir_builder_.getInt64(0),
+ /*linear=*/b_.getInt64(0),
ShapeUtil::GetSubshape(output->shape(),
reduce_output_shapes[i]),
- &ir_builder_),
- &ir_builder_, "output_element_address");
+ &b_),
+ &b_, "output_element_address");
TF_RETURN_IF_ERROR(EmitAtomicOperationForNestedComputation(
*reducers[i], output_address, partial_reduction_result_addresses[i]));
}
@@ -968,7 +969,7 @@ Status IrEmitterUnnested::EmitReductionToScalar(
static_cast<SequentialThunk*>(LastThunk())->thunks().back().get(),
ir_emitter_context_->llvm_module());
return ParallelLoopEmitter(loop_body_emitter, tiled_input_shape,
- launch_dimensions, &ir_builder_)
+ launch_dimensions, &b_)
.EmitLoop(IrName(reduce), index_ty);
}
@@ -981,8 +982,8 @@ Status IrEmitterUnnested::EmitColumnReduction(
tensorflow::gtl::ArraySlice<
std::pair<llvm_ir::ElementGenerator, ShapeIndex>>
extra_output_gens) {
- // Divide the input matrix into tiles of size Kx1. For example, when the
- // input matrix is 4x4 and K=2, the tiled matrix looks like
+ // Divide the input matrix into tiles of size KxL. For example, when the
+ // input matrix is 4x4, K=2, and L=1 the tiled matrix looks like
//
// 0123
// 0123
@@ -994,46 +995,64 @@ Status IrEmitterUnnested::EmitColumnReduction(
//
// We choose 128 as the tile size based on empirical evidence. It's big enough
// to reduce the amount of atomic adds in the end, maximizing the memory
- // bandwidth.
- constexpr int64 kTileSize = 128;
+ // bandwidth. A tile width of 2 allows for high memory bandwidth utilization
+ // on 16b input data.
+ constexpr int64 kTileHeight = 128;
+ constexpr int64 kTileWidth = 2;
- // If the height is not a multiple of the tile size, we pad the bottom of the
+ // If the height is not a multiple of kTileHeight, we pad the bottom of the
// input matrix.
- const int64 height_in_tiles = CeilOfRatio(height, kTileSize);
- Shape tiled_input_shape = ShapeUtil::MakeShapeWithLayout(
- reduce->shape().element_type(), {height_in_tiles, width}, {1, 0});
+ const int64 height_in_tiles = CeilOfRatio(height, kTileHeight);
+ // If width is not a multiple of kTileWidth the rightmost thread will process
+ // fewer input elements.
+ const int64 width_in_tiles = CeilOfRatio(width, kTileWidth);
+ Shape tiled_input_shape =
+ ShapeUtil::MakeShapeWithLayout(reduce->shape().element_type(),
+ {height_in_tiles, width_in_tiles}, {1, 0});
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
tiled_input_shape, ir_emitter_context_->device_description());
// TODO(b/110211620): Convert to use i32 index_type when it is possible.
- llvm::Type* index_ty = ir_builder_.getInt64Ty();
+ llvm::Type* index_ty = b_.getInt64Ty();
auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
};
// for (linear_index = threadIdx.x + blockIdx.x * blockDim.x;
- // linear_index < height_in_tiles * width;
+ // linear_index < height_in_tiles * width_in_tiles;
// linear_index += blockDim.x * gridDim.x) {
- // y_in_tiles = linear_index / width;
- // x = linear_index % width;
+ // y_in_tiles = linear_index / width_in_tiles;
+ // x_in_tiles = linear_index % width_in_tiles;
//
- // partial_result = init_value;
- // if (height % kTileSize == 0 ||
- // y_in_tiles * kTileSize + kTileSize <= height) {
- // for (element_id_in_tile : range(kTileSize)) {
- // y = y_in_tiles * kTileSize + element_id_in_tile;
- // partial_result = Reducer(partial_result, input[y][x]);
+ // partial_results[kTileWidth] = init_values;
+ // tile_in_y_bounds = height % kTileHeight == 0 ||
+ // y_in_tiles * kTileHeight + kTileHeight <= height;
+ // tile_in_x_bounds = width % kTileWidth == 0 ||
+ // x_in_tiles * kTileWidth + kTileWidth <= width;
+ // // The implementation handles y and x bound checks separately.
+ // if (tile_in_y_bounds && tile_in_x_bounds) {
+ // for (y_offset : range(kTileHeight)) {
+ // y = y_in_tiles * kTileHeight + y_offset;
+ // for (x_offset : range(kTileWidth)) {
+ // x = x_in_tiles * kTileWidth + x_offset;
+ // partial_result = Reducer(partial_result[x_offset], input[y][x]);
+ // }
// }
// } else {
- // for (element_id_in_tile : range(kTileSize)) {
- // y = y_in_tiles * kTileSize + element_id_in_tile;
- // if (y < height) {
- // partial_result = Reducer(partial_result, input[y][x]);
+ // for (y_offset : range(kTileHeight)) {
+ // y = y_in_tiles * kTileHeight + y_offset;
+ // for (y_offset : range(kTileHeight)) {
+ // x = x_in_tiles * kTileWidth + x_offset;
+ // if (y < height && x < width) {
+ // partial_result = Reducer(partial_result, input[y][x]);
+ // }
// }
// }
// }
- // AtomicReducer(&output[x], partial_result);
+ // for (x_offset : range(kTileWidth)) {
+ // AtomicReducer(&output[x + x_offset], partial_result[x_offset]);
+ // }
// }
auto loop_body_emitter = [=](const IrArray::Index& tile_index) -> Status {
const int num_reduces = reducers.size();
@@ -1042,51 +1061,65 @@ Status IrEmitterUnnested::EmitColumnReduction(
llvm_ir::PrimitiveTypeToIrType(input_shape.element_type(), module_);
std::vector<llvm::Value*> partial_reduction_result_addresses;
for (int i = 0; i != num_reduces; ++i) {
- llvm::Value* partial_reduction_result_address = ir_builder_.CreateAlloca(
- element_ir_type, /*ArraySize=*/nullptr,
- "partial_reduction_result." + llvm::Twine(i));
- TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
- init_value_gens[i](IrArray::Index(index_ty)));
- ir_builder_.CreateStore(init_ir_value, partial_reduction_result_address);
- partial_reduction_result_addresses.push_back(
- partial_reduction_result_address);
+ for (int x_offset = 0; x_offset < kTileWidth; ++x_offset) {
+ llvm::Value* partial_reduction_result_address =
+ b_.CreateAlloca(element_ir_type, /*ArraySize=*/nullptr,
+ "partial_reduction_result." +
+ llvm::Twine(i * kTileWidth + x_offset));
+ TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
+ init_value_gens[i](IrArray::Index(index_ty)));
+ b_.CreateStore(init_ir_value, partial_reduction_result_address);
+ partial_reduction_result_addresses.push_back(
+ partial_reduction_result_address);
+ }
}
// Emit an inner for-loop that partially reduces the elements in the given
// tile.
llvm::Value* y_in_tiles = tile_index[0];
- llvm::Value* x = tile_index[1];
+ llvm::Value* x_in_tiles = tile_index[1];
- y_in_tiles = ir_builder_.CreateZExtOrTrunc(y_in_tiles, index_ty);
- x = ir_builder_.CreateZExtOrTrunc(x, index_ty);
+ y_in_tiles = b_.CreateZExtOrTrunc(y_in_tiles, index_ty);
+ x_in_tiles = b_.CreateZExtOrTrunc(x_in_tiles, index_ty);
- auto emit_tile_element_loop = [=](bool tile_in_bounds) -> Status {
+ auto emit_tile_element_loop = [=](bool tile_in_y_bounds,
+ bool tile_in_x_bounds) -> Status {
std::unique_ptr<llvm_ir::ForLoop> tile_element_loop =
- llvm_ir::ForLoop::EmitForLoop("element_id_in_tile",
- index_typed_constant(0),
- index_typed_constant(kTileSize),
- index_typed_constant(1), &ir_builder_);
+ llvm_ir::ForLoop::EmitForLoop(
+ "element_id_in_tile", index_typed_constant(0),
+ index_typed_constant(kTileHeight), index_typed_constant(1), &b_);
// Emit the body of the partial reduction loop.
llvm_ir::SetToFirstInsertPoint(tile_element_loop->GetBodyBasicBlock(),
- &ir_builder_);
- llvm::Value* y = ir_builder_.CreateNSWAdd(
- ir_builder_.CreateNSWMul(y_in_tiles, index_typed_constant(kTileSize)),
+ &b_);
+ llvm::Value* y = b_.CreateNSWAdd(
+ b_.CreateNSWMul(y_in_tiles, index_typed_constant(kTileHeight)),
tile_element_loop->GetIndVarValue());
- // Unless we know the tile is entirely in bounds, we have to emit a
- // y-in-bounds check before reading from the input.
- if (!tile_in_bounds) {
+ // Unless we know that y is in bounds, we have to emit a check before
+ // reading from the input.
+ if (!tile_in_y_bounds) {
llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpULT(y, index_typed_constant(height)),
- "y_in_bounds", &ir_builder_);
+ b_.CreateICmpULT(y, index_typed_constant(height)), "y_in_bounds",
+ &b_);
// Emit code that reads the input element and accumulates it to
// the partial reduction result.
- llvm_ir::SetToFirstInsertPoint(if_data.true_block, &ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_data.true_block, &b_);
}
- llvm::Value* input_address = ir_builder_.CreateAlloca(element_ir_type);
- {
+ for (int x_offset = 0; x_offset < kTileWidth; ++x_offset) {
+ llvm::Value* x = b_.CreateNSWAdd(
+ b_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileWidth)),
+ index_typed_constant(x_offset));
+ // Unless we know that x is in bounds, we have to emit a check before
+ // reading from the input.
+ if (!tile_in_x_bounds) {
+ llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(
+ b_.CreateICmpULT(x, index_typed_constant(width)), "x_in_bounds",
+ &b_);
+ llvm_ir::SetToFirstInsertPoint(if_data.true_block, &b_);
+ }
+ llvm::Value* input_address = b_.CreateAlloca(element_ir_type);
// {y,x} is an index to input_matrix_shape [height,width]. We need to
// convert that to an index to input_shape (the shape of the operand of
// "reduce"). This conversion is composed of a transposition from
@@ -1103,65 +1136,94 @@ Status IrEmitterUnnested::EmitColumnReduction(
ShapeUtil::MakeShapeWithDescendingLayout(input_shape.element_type(),
{height, width});
const IrArray::Index input_matrix_index({y, x}, input_matrix_shape,
- &ir_builder_);
+ &b_);
const IrArray::Index input_index =
input_matrix_index
.SourceIndexOfReshape(input_matrix_shape,
- normalized_input_shape, &ir_builder_)
+ normalized_input_shape, &b_)
.SourceIndexOfTranspose(normalized_input_shape, input_shape,
- transpose_dimension_mapping,
- &ir_builder_);
+ transpose_dimension_mapping, &b_);
for (int i = 0; i != num_reduces; ++i) {
TF_ASSIGN_OR_RETURN(llvm::Value* const input_ir_value,
input_gens[i](input_index));
- ir_builder_.CreateStore(input_ir_value, input_address);
+ b_.CreateStore(input_ir_value, input_address);
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*reducers[i],
- {partial_reduction_result_addresses[i], input_address},
- partial_reduction_result_addresses[i]));
+ {partial_reduction_result_addresses[i * kTileWidth + x_offset],
+ input_address},
+ partial_reduction_result_addresses[i * kTileWidth + x_offset]));
+ TF_RETURN_IF_ERROR(EmitExtraOutputsForReduce(reduce, input_index,
+ extra_output_gens));
}
- return EmitExtraOutputsForReduce(reduce, input_index,
- extra_output_gens);
}
+ return Status::OK();
};
- // y_end = kTileSize + y_in_tiles * kTileSize, i.e., the y location that's
- // immediately beyond the tile.
- llvm::Value* y_end = ir_builder_.CreateNSWAdd(
- index_typed_constant(kTileSize),
- ir_builder_.CreateNSWMul(y_in_tiles, index_typed_constant(kTileSize)));
- llvm::Value* tile_in_bounds = ir_builder_.CreateOr(
- ir_builder_.CreateICmpULE(y_end, index_typed_constant(height)),
- ir_builder_.getInt1(height % kTileSize == 0));
- // The tile is entirely in bound if "height" is a multiple of kTileSize or
+ // y_end = kTileHeight + y_in_tiles * kTileHeight, i.e., the y location
+ // that's immediately beyond the tile.
+ llvm::Value* y_end = b_.CreateNSWAdd(
+ index_typed_constant(kTileHeight),
+ b_.CreateNSWMul(y_in_tiles, index_typed_constant(kTileHeight)));
+ // x_end = kTileWidth + x_in_tiles * kTileWidth, i.e., the x location
+ // that's immediately beyond the tile.
+ llvm::Value* x_end = b_.CreateNSWAdd(
+ index_typed_constant(kTileWidth),
+ b_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileWidth)));
+ llvm::Value* tile_in_y_bounds =
+ b_.CreateOr(b_.CreateICmpULE(y_end, index_typed_constant(height)),
+ b_.getInt1(height % kTileHeight == 0));
+ llvm::Value* tile_in_x_bounds =
+ b_.CreateOr(b_.CreateICmpULE(x_end, index_typed_constant(width)),
+ b_.getInt1(width % kTileWidth == 0));
+ // The tile is in y bounds if "height" is a multiple of kTileHeight or
// y_end <= height.
- llvm_ir::LlvmIfData if_tile_in_bounds_data =
- llvm_ir::EmitIfThenElse(tile_in_bounds, "tile_in_bounds", &ir_builder_);
- llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.true_block,
- &ir_builder_);
- TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_bounds=*/true));
- llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.false_block,
- &ir_builder_);
- TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_bounds=*/false));
-
- // After the if-then-else statement on tile_in_bounds, emit atomic
- // operations to accumulate the partial reduction result to the output
- // element.
- llvm_ir::SetToFirstInsertPoint(if_tile_in_bounds_data.after_block,
- &ir_builder_);
+ llvm_ir::LlvmIfData if_tile_in_y_bounds_data =
+ llvm_ir::EmitIfThenElse(tile_in_y_bounds, "tile_in_y_bounds", &b_);
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_y_bounds_data.true_block, &b_);
+ // The tile is in x bounds if "width" is a multiple of kTileWidth or
+ // x_end <= width.
+ llvm_ir::LlvmIfData if_tile_in_x_bounds_data =
+ llvm_ir::EmitIfThenElse(tile_in_x_bounds, "tile_in_x_bounds", &b_);
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_x_bounds_data.true_block, &b_);
+ TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_y_bounds=*/true,
+ /*tile_in_x_bounds=*/true));
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_x_bounds_data.false_block, &b_);
+ TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_y_bounds=*/true,
+ /*tile_in_x_bounds=*/false));
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_y_bounds_data.false_block, &b_);
+ if_tile_in_x_bounds_data =
+ llvm_ir::EmitIfThenElse(tile_in_x_bounds, "tile_in_x_bounds", &b_);
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_x_bounds_data.true_block, &b_);
+ TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_y_bounds=*/false,
+ /*tile_in_x_bounds=*/true));
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_x_bounds_data.false_block, &b_);
+ TF_RETURN_IF_ERROR(emit_tile_element_loop(/*tile_in_y_bounds=*/false,
+ /*tile_in_x_bounds=*/false));
+
+ // After the nested if-then-else statement on tile_in_y_bounds and
+ // tile_in_x_bounds, emit atomic operations to accumulate the partial
+ // reduction result to the output element.
+ llvm_ir::SetToFirstInsertPoint(if_tile_in_y_bounds_data.after_block, &b_);
const HloInstruction* output =
reduce->IsFused() ? reduce->parent()->FusionInstruction() : reduce;
for (int i = 0; i != num_reduces; ++i) {
- llvm::Value* output_address =
- GetIrArray(*output, *output, reduce_output_shapes[i])
- .EmitArrayElementAddress(
- IrArray::Index(x,
- ShapeUtil::GetSubshape(
- output->shape(), reduce_output_shapes[i]),
- &ir_builder_),
- &ir_builder_, "output_element_address");
- TF_RETURN_IF_ERROR(EmitAtomicOperationForNestedComputation(
- *reducers[i], output_address, partial_reduction_result_addresses[i]));
+ for (int x_offset = 0; x_offset < kTileWidth; ++x_offset) {
+ llvm::Value* x = b_.CreateNSWAdd(
+ b_.CreateNSWMul(x_in_tiles, index_typed_constant(kTileWidth)),
+ index_typed_constant(x_offset));
+ llvm::Value* output_address =
+ GetIrArray(*output, *output, reduce_output_shapes[i])
+ .EmitArrayElementAddress(
+ IrArray::Index(
+ x,
+ ShapeUtil::GetSubshape(output->shape(),
+ reduce_output_shapes[i]),
+ &b_),
+ &b_, "output_element_address");
+ TF_RETURN_IF_ERROR(EmitAtomicOperationForNestedComputation(
+ *reducers[i], output_address,
+ partial_reduction_result_addresses[i * kTileWidth + x_offset]));
+ }
}
return Status::OK();
};
@@ -1173,7 +1235,7 @@ Status IrEmitterUnnested::EmitColumnReduction(
static_cast<SequentialThunk*>(LastThunk())->thunks().back().get(),
ir_emitter_context_->llvm_module());
return ParallelLoopEmitter(loop_body_emitter, tiled_input_shape,
- launch_dimensions, &ir_builder_)
+ launch_dimensions, &b_)
.EmitLoop(IrName(reduce), index_ty);
}
@@ -1323,8 +1385,8 @@ Status IrEmitterUnnested::EmitRowReduction(
{depth / z_tile_size, height, width_in_tiles}, {2, 1, 0});
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
tiled_input_shape, ir_emitter_context_->device_description());
- llvm::Type* index_ty = GetIndexTypeForKernel(
- reduce, launch_dimensions.launch_bound(), &ir_builder_);
+ llvm::Type* index_ty =
+ GetIndexTypeForKernel(reduce, launch_dimensions.launch_bound(), &b_);
auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
@@ -1336,12 +1398,12 @@ Status IrEmitterUnnested::EmitRowReduction(
input_shape.element_type(), ir_emitter_context_->llvm_module());
std::vector<llvm::Value*> partial_reduction_result_addresses;
for (int i = 0; i != num_reduces; ++i) {
- llvm::Value* partial_reduction_result_address = ir_builder_.CreateAlloca(
- element_ir_type, /*ArraySize=*/nullptr,
- "partial_reduction_result." + llvm::Twine(i));
+ llvm::Value* partial_reduction_result_address =
+ b_.CreateAlloca(element_ir_type, /*ArraySize=*/nullptr,
+ "partial_reduction_result." + llvm::Twine(i));
TF_ASSIGN_OR_RETURN(llvm::Value* const init_ir_value,
init_value_gens[i](IrArray::Index(index_ty)));
- ir_builder_.CreateStore(init_ir_value, partial_reduction_result_address);
+ b_.CreateStore(init_ir_value, partial_reduction_result_address);
partial_reduction_result_addresses.push_back(
partial_reduction_result_address);
}
@@ -1350,25 +1412,25 @@ Status IrEmitterUnnested::EmitRowReduction(
llvm::Value* y = tile_index[1];
llvm::Value* x_tile = tile_index[2];
- x_tile = ir_builder_.CreateZExtOrTrunc(x_tile, index_ty);
+ x_tile = b_.CreateZExtOrTrunc(x_tile, index_ty);
- llvm::Value* warp_id = ir_builder_.CreateUDiv(
- x_tile, index_typed_constant(kWarpSize), "warp_id");
- llvm::Value* lane_id = ir_builder_.CreateURem(
- x_tile, index_typed_constant(kWarpSize), "lane_id");
+ llvm::Value* warp_id =
+ b_.CreateUDiv(x_tile, index_typed_constant(kWarpSize), "warp_id");
+ llvm::Value* lane_id =
+ b_.CreateURem(x_tile, index_typed_constant(kWarpSize), "lane_id");
// The x-location of the last element in this z-x-tile.
// last_x = lane_id + warpSize * (x_tile_size - 1 + warp_id * x_tile_size);
- llvm::Value* last_x = ir_builder_.CreateNSWAdd(
- lane_id, ir_builder_.CreateNSWMul(
- index_typed_constant(kWarpSize),
- ir_builder_.CreateNSWAdd(
- index_typed_constant(x_tile_size - 1),
- ir_builder_.CreateNSWMul(
- warp_id, index_typed_constant(x_tile_size)))));
+ llvm::Value* last_x = b_.CreateNSWAdd(
+ lane_id,
+ b_.CreateNSWMul(
+ index_typed_constant(kWarpSize),
+ b_.CreateNSWAdd(
+ index_typed_constant(x_tile_size - 1),
+ b_.CreateNSWMul(warp_id, index_typed_constant(x_tile_size)))));
KernelSupportLibrary ksl(
- &ir_builder_,
+ &b_,
/*unroll_mode=*/xla::llvm_ir::UnrollMode::kFullyUnroll,
/*prevent_vectorization=*/false);
@@ -1377,9 +1439,9 @@ Status IrEmitterUnnested::EmitRowReduction(
auto emit_z_x_tile_element_loop = [&](bool x_tile_in_bounds,
int64 x_tile_loop_bound) -> Status {
auto emit_z_tile_element_loop = [&](llvm::Value* z_indvar) -> Status {
- llvm::Value* z = ir_builder_.CreateNSWAdd(
- z_indvar, ir_builder_.CreateNSWMul(
- index_typed_constant(z_tile_size), z_tile));
+ llvm::Value* z = b_.CreateNSWAdd(
+ z_indvar,
+ b_.CreateNSWMul(index_typed_constant(z_tile_size), z_tile));
TF_RETURN_IF_ERROR(ksl.For(
"x_tile",
/*start=*/index_typed_constant(0),
@@ -1387,12 +1449,12 @@ Status IrEmitterUnnested::EmitRowReduction(
/*step=*/1, [&](llvm::Value* x_indvar) -> Status {
// x = lane_id +
// warpSize * (element_id_in_x_tile + warp_id * x_tile_size);
- llvm::Value* x = ir_builder_.CreateNSWAdd(
+ llvm::Value* x = b_.CreateNSWAdd(
lane_id,
- ir_builder_.CreateNSWMul(
+ b_.CreateNSWMul(
index_typed_constant(kWarpSize),
- ir_builder_.CreateNSWAdd(
- x_indvar, ir_builder_.CreateNSWMul(
+ b_.CreateNSWAdd(
+ x_indvar, b_.CreateNSWMul(
warp_id, llvm::ConstantInt::get(
index_ty, x_tile_size)))));
@@ -1400,18 +1462,17 @@ Status IrEmitterUnnested::EmitRowReduction(
// emit a x-in-bounds check before reading from the input.
if (!x_tile_in_bounds) {
llvm_ir::LlvmIfData if_x_in_bounds_data =
- llvm_ir::EmitIfThenElse(ir_builder_.CreateICmpULT(
- x, index_typed_constant(width)),
- "x_in_bounds", &ir_builder_);
- // Points ir_builder_ to the then-block.
+ llvm_ir::EmitIfThenElse(
+ b_.CreateICmpULT(x, index_typed_constant(width)),
+ "x_in_bounds", &b_);
+ // Points b_ to the then-block.
llvm_ir::SetToFirstInsertPoint(if_x_in_bounds_data.true_block,
- &ir_builder_);
+ &b_);
}
// Emit code that reads the input element and accumulates it
// to the partial reduction result.
- llvm::Value* input_address =
- ir_builder_.CreateAlloca(element_ir_type);
+ llvm::Value* input_address = b_.CreateAlloca(element_ir_type);
{
// {z,y,x} is an index to input_3d_tensor_shape
// [depth,height,width]. We need to convert that to an index
@@ -1430,20 +1491,19 @@ Status IrEmitterUnnested::EmitRowReduction(
ShapeUtil::MakeShapeWithDescendingLayout(
input_shape.element_type(), {depth, height, width});
const IrArray::Index input_3d_tensor_index(
- {z, y, x}, input_3d_tensor_shape, &ir_builder_);
+ {z, y, x}, input_3d_tensor_shape, &b_);
const IrArray::Index input_index =
input_3d_tensor_index
.SourceIndexOfReshape(input_3d_tensor_shape,
- normalized_input_shape,
- &ir_builder_)
+ normalized_input_shape, &b_)
.SourceIndexOfTranspose(
normalized_input_shape, input_shape,
- transpose_dimension_mapping, &ir_builder_);
+ transpose_dimension_mapping, &b_);
for (int i = 0; i != num_reduces; ++i) {
TF_ASSIGN_OR_RETURN(llvm::Value* const input_ir_value,
input_gens[i](input_index));
- ir_builder_.CreateStore(input_ir_value, input_address);
+ b_.CreateStore(input_ir_value, input_address);
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*reducers[i],
{partial_reduction_result_addresses[i], input_address},
@@ -1462,9 +1522,9 @@ Status IrEmitterUnnested::EmitRowReduction(
/*step=*/1, emit_z_tile_element_loop);
};
- llvm::Value* tile_in_bounds = ir_builder_.CreateOr(
- ir_builder_.getInt1(width % (x_tile_size * kWarpSize) == 0),
- ir_builder_.CreateICmpULT(last_x, index_typed_constant(width)));
+ llvm::Value* tile_in_bounds =
+ b_.CreateOr(b_.getInt1(width % (x_tile_size * kWarpSize) == 0),
+ b_.CreateICmpULT(last_x, index_typed_constant(width)));
TF_RETURN_IF_ERROR(
ksl.If(tile_in_bounds,
@@ -1487,23 +1547,25 @@ Status IrEmitterUnnested::EmitRowReduction(
// bitcast cannot be applied to aggregate types (even packed ones), so we
// instead bitcast addresses of load/store to intN* of the same bit-width.
llvm::Type* shuffle_ir_type = element_ir_type->isStructTy()
- ? ir_builder_.getIntNTy(bit_width)
+ ? b_.getIntNTy(bit_width)
: element_ir_type;
for (int shuffle_distance = 16; shuffle_distance >= 1;
shuffle_distance /= 2) {
- llvm::Value* result_from_other_lane = ir_builder_.CreateAlloca(
- element_ir_type, nullptr, "result_from_other_lane");
+ llvm::Value* result_from_other_lane =
+ b_.CreateAlloca(element_ir_type, nullptr, "result_from_other_lane");
for (int i = 0; i != num_reduces; ++i) {
- llvm::Value* partial_reduction_result = ir_builder_.CreateLoad(
- ir_builder_.CreateBitCast(partial_reduction_result_addresses[i],
- shuffle_ir_type->getPointerTo()),
+ llvm::Value* partial_reduction_result = b_.CreateLoad(
+ b_.CreateBitCast(partial_reduction_result_addresses[i],
+ shuffle_ir_type->getPointerTo()),
"partial_reduction_result");
- ir_builder_.CreateStore(
- EmitShuffleDown(partial_reduction_result,
- ir_builder_.getInt32(shuffle_distance),
- &ir_builder_),
- ir_builder_.CreateBitCast(result_from_other_lane,
- shuffle_ir_type->getPointerTo()));
+ CHECK_EQ(launch_dimensions.threads_per_block() % kWarpSize, 0)
+ << "Requires block size a multiple of the warp size, otherwise we "
+ "will read undefined elements.";
+ b_.CreateStore(
+ EmitFullWarpShuffleDown(partial_reduction_result,
+ b_.getInt32(shuffle_distance), &b_),
+ b_.CreateBitCast(result_from_other_lane,
+ shuffle_ir_type->getPointerTo()));
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*reducers[i],
{partial_reduction_result_addresses[i], result_from_other_lane},
@@ -1518,10 +1580,9 @@ Status IrEmitterUnnested::EmitRowReduction(
// lane 0 (which holds the partially accumulated result for its warp) to the
// output element.
llvm_ir::LlvmIfData if_lane_id_is_zero_data = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateICmpEQ(lane_id, index_typed_constant(0)),
- "lane_id_is_zero", &ir_builder_);
- llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block,
- &ir_builder_);
+ b_.CreateICmpEQ(lane_id, index_typed_constant(0)), "lane_id_is_zero",
+ &b_);
+ llvm_ir::SetToFirstInsertPoint(if_lane_id_is_zero_data.true_block, &b_);
for (int i = 0; i != num_reduces; ++i) {
llvm::Value* output_address =
GetIrArray(*output, *output, reduce_output_shapes[i])
@@ -1529,8 +1590,8 @@ Status IrEmitterUnnested::EmitRowReduction(
IrArray::Index(y,
ShapeUtil::GetSubshape(
output->shape(), reduce_output_shapes[i]),
- &ir_builder_),
- &ir_builder_, "output_element_address");
+ &b_),
+ &b_, "output_element_address");
// We don't need to emit atomic operations if there is only one tile of
// results. 'depth' is the z dimension, 'width' is the x dimension.
if (z_tile_size >= depth && x_tile_size >= width) {
@@ -1554,7 +1615,7 @@ Status IrEmitterUnnested::EmitRowReduction(
static_cast<SequentialThunk*>(LastThunk())->thunks().back().get(),
ir_emitter_context_->llvm_module());
return ParallelLoopEmitter(loop_body_emitter, tiled_input_shape,
- launch_dimensions, &ir_builder_)
+ launch_dimensions, &b_)
.EmitLoop(IrName(reduce), index_ty);
}
@@ -1680,12 +1741,11 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) {
return EmitReductionToVector(
reduce, input->shape(), {[&](const IrArray::Index& index) {
- return GetIrArray(*input, *reduce)
- .EmitReadArrayElement(index, &ir_builder_);
+ return GetIrArray(*input, *reduce).EmitReadArrayElement(index, &b_);
}},
{[&](const IrArray::Index& index) {
return GetIrArray(*init_value, *reduce)
- .EmitReadArrayElement(index, &ir_builder_);
+ .EmitReadArrayElement(index, &b_);
}},
dimensions_to_reduce, {reducer}, {{}}, {});
}
@@ -1698,8 +1758,9 @@ Status IrEmitterUnnested::HandleReduce(HloInstruction* reduce) {
Status IrEmitterUnnested::HandleTuple(HloInstruction* tuple) {
bool all_tuple_elements_have_buffer =
c_all_of(tuple->operands(), [&](HloInstruction* tuple_element) {
- return ir_emitter_context_->buffer_assignment().HasTopLevelAllocation(
- tuple_element);
+ return ir_emitter_context_->buffer_assignment()
+ .GetUniqueTopLevelSlice(tuple_element)
+ .ok();
});
// Tuples (especially tuples that are the final result of a computation) can
// be so huge that if we were to emit a kernel that took each tuple element as
@@ -1760,7 +1821,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
source->shape(), ir_emitter_context_->device_description());
llvm::Type* index_type = GetIndexTypeForKernel(
- select_and_scatter, launch_dimensions.launch_bound(), &ir_builder_);
+ select_and_scatter, launch_dimensions.launch_bound(), &b_);
auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_type, c);
};
@@ -1791,19 +1852,18 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
llvm::Value* selected_value_address = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(operand_element_type,
ir_emitter_context_->llvm_module()),
- "selected_value_address", &ir_builder_);
+ "selected_value_address", &b_);
llvm::Value* selected_index_address =
llvm_ir::EmitAllocaAtFunctionEntryWithCount(
index_type, index_typed_constant(rank), "selected_index_address",
- &ir_builder_);
+ &b_);
llvm::Value* initialized_flag_address = llvm_ir::EmitAllocaAtFunctionEntry(
- ir_builder_.getInt1Ty(), "initialized_flag_address", &ir_builder_);
- ir_builder_.CreateStore(ir_builder_.getInt1(false),
- initialized_flag_address);
+ b_.getInt1Ty(), "initialized_flag_address", &b_);
+ b_.CreateStore(b_.getInt1(false), initialized_flag_address);
// Create the inner loop to iterate over the window.
- llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "inner"),
- &ir_builder_, index_type);
+ llvm_ir::ForLoopNest window_loops(IrName(select_and_scatter, "inner"), &b_,
+ index_type);
std::vector<int64> window_size;
for (const auto& dim : window.dimensions()) {
window_size.push_back(dim.size());
@@ -1812,84 +1872,79 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
const IrArray::Index window_index = window_loops.AddLoopsForShape(
ShapeUtil::MakeShape(operand_element_type, window_size), "window");
llvm_ir::SetToFirstInsertPoint(window_loops.GetInnerLoopBodyBasicBlock(),
- &ir_builder_);
+ &b_);
// Compute the operand index to visit and evaluate the condition whether the
// operand index is within the bounds. The unsigned comparison includes
// checking whether the operand index >= 0.
IrArray::Index operand_index(index_type, source_index.size());
- llvm::Value* in_bounds_condition = ir_builder_.getInt1(true);
+ llvm::Value* in_bounds_condition = b_.getInt1(true);
for (int64 i = 0; i < rank; ++i) {
- llvm::Value* strided_index = ir_builder_.CreateNSWMul(
+ llvm::Value* strided_index = b_.CreateNSWMul(
source_index[i], index_typed_constant(window.dimensions(i).stride()));
- operand_index[i] = ir_builder_.CreateNSWSub(
- ir_builder_.CreateNSWAdd(strided_index, window_index[i]),
+ operand_index[i] = b_.CreateNSWSub(
+ b_.CreateNSWAdd(strided_index, window_index[i]),
index_typed_constant(window.dimensions(i).padding_low()));
- llvm::Value* index_condition = ir_builder_.CreateICmpULT(
+ llvm::Value* index_condition = b_.CreateICmpULT(
operand_index[i],
index_typed_constant(ShapeUtil::GetDimension(operand->shape(), i)));
- in_bounds_condition =
- ir_builder_.CreateAnd(in_bounds_condition, index_condition);
+ in_bounds_condition = b_.CreateAnd(in_bounds_condition, index_condition);
}
CHECK(in_bounds_condition != nullptr);
// Only need to do something if the operand index is within the bounds.
// First check if the initialized_flag is set.
llvm_ir::LlvmIfData if_in_bounds =
- llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &ir_builder_);
- llvm_ir::SetToFirstInsertPoint(if_in_bounds.true_block, &ir_builder_);
+ llvm_ir::EmitIfThenElse(in_bounds_condition, "in-bounds", &b_);
+ llvm_ir::SetToFirstInsertPoint(if_in_bounds.true_block, &b_);
llvm_ir::LlvmIfData if_initialized = llvm_ir::EmitIfThenElse(
- ir_builder_.CreateLoad(initialized_flag_address), "initialized",
- &ir_builder_);
+ b_.CreateLoad(initialized_flag_address), "initialized", &b_);
// If the initialized_flag is false, initialize the selected value and index
// with the currently visiting operand.
- llvm_ir::SetToFirstInsertPoint(if_initialized.false_block, &ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_initialized.false_block, &b_);
const auto save_operand_index = [&](const IrArray::Index& operand_index) {
for (int64 i = 0; i < rank; ++i) {
llvm::Value* selected_index_address_slot =
- ir_builder_.CreateInBoundsGEP(selected_index_address,
- {ir_builder_.getInt32(i)});
- ir_builder_.CreateStore(operand_index[i], selected_index_address_slot);
+ b_.CreateInBoundsGEP(selected_index_address, {b_.getInt32(i)});
+ b_.CreateStore(operand_index[i], selected_index_address_slot);
}
};
IrArray operand_array = GetIrArray(*operand, *select_and_scatter);
llvm::Value* operand_data =
- operand_array.EmitReadArrayElement(operand_index, &ir_builder_);
- ir_builder_.CreateStore(operand_data, selected_value_address);
+ operand_array.EmitReadArrayElement(operand_index, &b_);
+ b_.CreateStore(operand_data, selected_value_address);
save_operand_index(operand_index);
- ir_builder_.CreateStore(ir_builder_.getInt1(true),
- initialized_flag_address);
+ b_.CreateStore(b_.getInt1(true), initialized_flag_address);
// If the initialized_flag is true, call the `select` function to
// potentially update the selected value and index with the currently
// visiting operand.
- llvm_ir::SetToFirstInsertPoint(if_initialized.true_block, &ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_initialized.true_block, &b_);
const Shape output_shape = ShapeUtil::MakeShape(PRED, {});
llvm::Value* operand_address =
- operand_array.EmitArrayElementAddress(operand_index, &ir_builder_);
+ operand_array.EmitArrayElementAddress(operand_index, &b_);
llvm::Value* select_return_buffer = llvm_ir::EmitAllocaAtFunctionEntry(
llvm_ir::PrimitiveTypeToIrType(PRED,
ir_emitter_context_->llvm_module()),
- "select_return_buffer", &ir_builder_);
+ "select_return_buffer", &b_);
TF_RETURN_IF_ERROR(EmitCallToNestedComputation(
*select_and_scatter->select(),
{selected_value_address, operand_address}, select_return_buffer));
- llvm::Value* result = ir_builder_.CreateLoad(select_return_buffer);
+ llvm::Value* result = b_.CreateLoad(select_return_buffer);
// If the 'select' function returns false, update the selected value and the
// index to the currently visiting operand.
- llvm::Value* cond = ir_builder_.CreateICmpNE(
+ llvm::Value* cond = b_.CreateICmpNE(
result,
llvm::ConstantInt::get(llvm_ir::PrimitiveTypeToIrType(
PRED, ir_emitter_context_->llvm_module()),
0),
"boolean_predicate");
llvm_ir::LlvmIfData if_select_lhs =
- llvm_ir::EmitIfThenElse(cond, "if-select-lhs", &ir_builder_);
- llvm_ir::SetToFirstInsertPoint(if_select_lhs.false_block, &ir_builder_);
- ir_builder_.CreateStore(ir_builder_.CreateLoad(operand_address),
- selected_value_address);
+ llvm_ir::EmitIfThenElse(cond, "if-select-lhs", &b_);
+ llvm_ir::SetToFirstInsertPoint(if_select_lhs.false_block, &b_);
+ b_.CreateStore(b_.CreateLoad(operand_address), selected_value_address);
save_operand_index(operand_index);
// After iterating over the window elements, scatter the source element to
@@ -1897,20 +1952,19 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
// location is computed by calling the `scatter` function with the source
// value and the current output value.
llvm_ir::SetToFirstInsertPoint(window_loops.GetOuterLoopExitBasicBlock(),
- &ir_builder_);
+ &b_);
IrArray::Index selected_index(operand_index.GetType());
for (int64 i = 0; i < rank; ++i) {
- llvm::Value* selected_index_address_slot = ir_builder_.CreateInBoundsGEP(
- selected_index_address, {ir_builder_.getInt32(i)});
- selected_index.push_back(
- ir_builder_.CreateLoad(selected_index_address_slot));
+ llvm::Value* selected_index_address_slot =
+ b_.CreateInBoundsGEP(selected_index_address, {b_.getInt32(i)});
+ selected_index.push_back(b_.CreateLoad(selected_index_address_slot));
}
llvm::Value* source_value_address =
GetIrArray(*source, *select_and_scatter)
- .EmitArrayElementAddress(source_index, &ir_builder_);
+ .EmitArrayElementAddress(source_index, &b_);
llvm::Value* output_value_address =
GetIrArray(*select_and_scatter, *select_and_scatter)
- .EmitArrayElementAddress(selected_index, &ir_builder_);
+ .EmitArrayElementAddress(selected_index, &b_);
return EmitAtomicOperationForNestedComputation(
*select_and_scatter->scatter(), output_value_address,
source_value_address);
@@ -1925,7 +1979,7 @@ Status IrEmitterUnnested::HandleSelectAndScatter(
static_cast<SequentialThunk*>(LastThunk())->thunks().back().get(),
ir_emitter_context_->llvm_module());
return ParallelLoopEmitter(loop_body_emitter, source->shape(),
- launch_dimensions, &ir_builder_)
+ launch_dimensions, &b_)
.EmitLoop(IrName(select_and_scatter), index_type);
}
@@ -1964,6 +2018,75 @@ Status IrEmitterUnnested::HandleSelect(HloInstruction* select) {
return IrEmitter::HandleSelect(select);
}
+Status IrEmitterUnnested::HandleSort(HloInstruction* sort) {
+ std::vector<std::unique_ptr<Thunk>> thunks;
+ auto values = sort->operand_count() > 1 ? sort->operand(1) : nullptr;
+ if (values != nullptr) {
+ // TODO(b/26783907): Also sort the values by their corresponding key.
+ return Unimplemented("Key/Value Sort is not implemented on GPU");
+ }
+
+ // First copy the operand to the output, so that we can sort in-place.
+ // TODO(b/26783907): Share buffer of output and operand when it is possible.
+ if (sort->operand(0)->IsConstant()) {
+ thunks.push_back(MakeUnique<HostToDeviceCopyThunk>(
+ /*source_address=*/sort->operand(0)->literal().untyped_data(),
+ /*destination_buffer=*/GetAllocationSlice(*sort),
+ /*mem_size=*/ShapeUtil::ByteSizeOf(sort->shape()), sort));
+ } else {
+ thunks.push_back(MakeUnique<DeviceToDeviceCopyThunk>(
+ /*source_address=*/GetAllocationSlice(*sort->operand(0)),
+ /*destination_buffer=*/GetAllocationSlice(*sort),
+ /*mem_size=*/ShapeUtil::ByteSizeOf(sort->shape()), sort));
+ }
+
+ int64 dimension_to_sort = sort->dimensions(0);
+ int64 dimension_to_sort_bound = sort->shape().dimensions(dimension_to_sort);
+ int64 num_stages = tensorflow::Log2Ceiling(dimension_to_sort_bound);
+ auto index_type = b_.getInt64Ty();
+
+ // Naive C++ code for the outer loops:
+ //
+ // for (int64 stage = 0; stage < Log2Ceiling(dimension_to_sort_bound);
+ // ++stage) {
+ // int64 first_xor_mask = (1LL << (stage + 1)) - 1;
+ // SortInPlace(first_xor_mask);
+ // for (int64 mask = stage - 1; mask >= 0; --mask) {
+ // int64 later_xor_mask = 1LL << mask;
+ // SortInPlace(later_xor_mask);
+ // }
+ // }
+ //
+ // This follows the algorithm described on Wikipedia:
+ // https://en.wikipedia.org/wiki/Bitonic_sorter
+
+ for (int64 stage = 0; stage < num_stages; ++stage) {
+ for (int64 mask = stage; mask >= 0; --mask) {
+ thunks.push_back(
+ BuildKernelThunk(sort, /*implements_whole_instruction=*/false));
+ LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
+ sort->shape(), ir_emitter_context_->device_description());
+ UpdateLaunchDimensions(launch_dimensions, thunks.back().get(),
+ ir_emitter_context_->llvm_module());
+
+ llvm::Value* xor_mask;
+ if (mask == stage) {
+ xor_mask = llvm::ConstantInt::get(index_type, (1LL << (stage + 1)) - 1);
+ } else {
+ xor_mask = llvm::ConstantInt::get(index_type, 1LL << mask);
+ }
+
+ TF_RETURN_IF_ERROR(llvm_ir::EmitSortInPlace(
+ dimension_to_sort, GetIrArray(*sort, *sort), IrName(sort), xor_mask,
+ &b_, &launch_dimensions));
+ }
+ }
+
+ thunk_sequence_->emplace_back(
+ MakeUnique<SequentialThunk>(std::move(thunks), sort));
+ return Status::OK();
+}
+
Status IrEmitterUnnested::HandleTupleSelect(HloInstruction* tuple_select) {
thunk_sequence_->push_back(
BuildKernelThunk(tuple_select, /*implements_whole_instruction=*/true));
@@ -2215,18 +2338,16 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
<< " is found in slice " << slice.ToString() << " at GTE index "
<< gte_index.ToString();
- llvm::Value* loc =
- ir_builder_.CreateInBoundsGEP(kernel_args.at(slice.allocation()),
- {ir_builder_.getInt64(slice.offset())});
+ llvm::Value* loc = b_.CreateInBoundsGEP(kernel_args.at(slice.allocation()),
+ {b_.getInt64(slice.offset())});
// If gte_index is nonempty, we have to dereference `loc` to get to the
// value we're ultimately interested in.
llvm::Type* int8_double_pointer =
- llvm::PointerType::get(ir_builder_.getInt8PtrTy(), /*AddressSpace=*/0);
+ llvm::PointerType::get(b_.getInt8PtrTy(), /*AddressSpace=*/0);
for (int64 idx : gte_index) {
- loc = ir_builder_.CreateBitCast(loc, int8_double_pointer);
- loc = ir_builder_.CreateLoad(
- ir_builder_.CreateInBoundsGEP(loc, {ir_builder_.getInt64(idx)}));
+ loc = b_.CreateBitCast(loc, int8_double_pointer);
+ loc = b_.CreateLoad(b_.CreateInBoundsGEP(loc, {b_.getInt64(idx)}));
}
bindings_.BindHloToIrValue(*instr, loc, index);
@@ -2238,7 +2359,7 @@ std::unique_ptr<KernelThunk> IrEmitterUnnested::BuildKernelThunk(
bindings_.SetTempBufferBase(kernel_args.at(*temp_buffer));
} else {
bindings_.SetTempBufferBase(
- llvm::ConstantPointerNull::get(ir_builder_.getInt8PtrTy()));
+ llvm::ConstantPointerNull::get(b_.getInt8PtrTy()));
}
return MakeUnique<KernelThunk>(buffers, llvm_ir::AsString(kernel->getName()),
@@ -2485,10 +2606,9 @@ StatusOr<std::unique_ptr<Thunk>> IrEmitterUnnested::BuildInitializerThunk(
TF_RETURN_IF_ERROR(ParallelLoopEmitter(
[=](const IrArray::Index& index) {
return GetIrArray(*init_value, *hlo)
- .EmitReadArrayElement(index, &ir_builder_);
+ .EmitReadArrayElement(index, &b_);
},
- GetIrArray(*hlo, *hlo, index), launch_dimensions,
- &ir_builder_)
+ GetIrArray(*hlo, *hlo, index), launch_dimensions, &b_)
.EmitLoop(IrName(hlo)));
// Clean up state left behind by emitting the loop above. (This is normally
@@ -2672,10 +2792,10 @@ Status IrEmitterUnnested::EmitTargetElementLoopInThunk(
ir_emitter_context_->llvm_module());
if (!hlo.IsMultiOutputFusion()) {
return ParallelLoopEmitter(element_generator, GetIrArray(hlo, hlo),
- launch_dimensions, &ir_builder_, unroll_factor)
- .EmitLoop(IrName(&hlo),
- GetIndexTypeForKernel(&hlo, launch_dimensions.launch_bound(),
- &ir_builder_));
+ launch_dimensions, &b_, unroll_factor)
+ .EmitLoop(
+ IrName(&hlo),
+ GetIndexTypeForKernel(&hlo, launch_dimensions.launch_bound(), &b_));
}
// For multioutput fusion, we need to emit each operand and the root.
@@ -2685,25 +2805,24 @@ Status IrEmitterUnnested::EmitTargetElementLoopInThunk(
}
TF_RETURN_IF_ERROR(
ParallelLoopEmitter(element_generator, output_arrays, launch_dimensions,
- &ir_builder_, unroll_factor)
+ &b_, unroll_factor)
.EmitLoop(IrName(&hlo),
GetIndexTypeForKernel(
- &hlo, launch_dimensions.launch_bound(), &ir_builder_)));
+ &hlo, launch_dimensions.launch_bound(), &b_)));
std::vector<llvm::Value*> tuple_operand_ptrs;
for (int64 i = 0; i < output_arrays.size(); ++i) {
tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());
}
- ir_builder_.SetInsertPoint(ir_builder_.GetInsertBlock()->getTerminator());
- llvm_ir::EmitTuple(GetIrArray(hlo, hlo), tuple_operand_ptrs, &ir_builder_,
- module_);
+ b_.SetInsertPoint(b_.GetInsertBlock()->getTerminator());
+ llvm_ir::EmitTuple(GetIrArray(hlo, hlo), tuple_operand_ptrs, &b_, module_);
return Status::OK();
}
Status IrEmitterUnnested::EmitTargetElementLoop(
const HloInstruction& hlo,
const llvm_ir::ElementGenerator& element_generator) {
- CHECK(Thunk::Kind::kKernel == LastThunk()->kind());
+ CHECK_EQ(Thunk::Kind::kKernel, LastThunk()->kind());
return EmitTargetElementLoopInThunk(hlo, element_generator,
static_cast<KernelThunk*>(LastThunk()));
}
@@ -2747,14 +2866,14 @@ int IrEmitterUnnested::ConstructOutputReducedShapeAndCastOutputIrArrayToShape(
output_reduced_shapes->push_back(ShapeUtil::MakeShapeWithDescendingLayout(
ShapeUtil::GetSubshape(hlo.shape(), {i}).element_type(),
reduced_output_dims));
- output_in_reduced_shape_arrays->push_back(output_arrays[i].CastToShape(
- (*output_reduced_shapes)[i], &ir_builder_));
+ output_in_reduced_shape_arrays->push_back(
+ output_arrays[i].CastToShape((*output_reduced_shapes)[i], &b_));
}
} else {
output_reduced_shapes->push_back(ShapeUtil::MakeShapeWithDescendingLayout(
hlo.shape().element_type(), reduced_output_dims));
- output_in_reduced_shape_arrays->push_back(output_arrays[0].CastToShape(
- (*output_reduced_shapes)[0], &ir_builder_));
+ output_in_reduced_shape_arrays->push_back(
+ output_arrays[0].CastToShape((*output_reduced_shapes)[0], &b_));
}
return num_outputs;
}
@@ -2778,8 +2897,8 @@ int IrEmitterUnnested::ConstructInputReducedShapeAndCastInputIrArrayToShape(
param_reduced_shapes->push_back(ShapeUtil::MakeShapeWithDescendingLayout(
param->shape().element_type(),
Permute({0, 2, 1}, reduced_output_dims)));
- param_in_reduced_shape_arrays->push_back(param_arrays[id].CastToShape(
- (*param_reduced_shapes)[id], &ir_builder_));
+ param_in_reduced_shape_arrays->push_back(
+ param_arrays[id].CastToShape((*param_reduced_shapes)[id], &b_));
}
return num_params;
}
@@ -2928,7 +3047,7 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
kTileSize);
const int kNVPTXSharedMemoryAddrSpace = 3;
auto* tile_base_ptr = new llvm::GlobalVariable(
- *ir_builder_.GetInsertBlock()->getParent()->getParent(), tile_type,
+ *b_.GetInsertBlock()->getParent()->getParent(), tile_type,
/*isConstant=*/false, llvm::GlobalValue::PrivateLinkage,
llvm::UndefValue::get(tile_type),
llvm_ir::AsStringRef(IrName(hlo, StrCat("tile", id))), nullptr,
@@ -2952,8 +3071,8 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
c_accumulate(output_dims_in_tiles, 1, std::multiplies<int64>());
LaunchDimensions launch_dimensions(num_tiles, kThreadsPerTile);
- llvm::Type* index_ty = GetIndexTypeForKernel(
- hlo, launch_dimensions.launch_bound(), &ir_builder_);
+ llvm::Type* index_ty =
+ GetIndexTypeForKernel(hlo, launch_dimensions.launch_bound(), &b_);
auto index_typed_constant = [&](uint64 c) -> llvm::Constant* {
return llvm::ConstantInt::get(index_ty, c);
};
@@ -2981,23 +3100,23 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
llvm::Value* x;
llvm::Value* y;
std::tie(y, x) = CalculateYXCoordinateWithinTile(
- &ir_builder_, index_typed_constant(kTileSize), kThreadsPerTile);
+ &b_, index_typed_constant(kTileSize), kThreadsPerTile);
// Calculate the index for the current output tile from block_id.
const IrArray::Index output_tile_index(
- GetBlockIdx(&ir_builder_, index_ty, num_tiles),
+ GetBlockIdx(&b_, index_ty, num_tiles),
ShapeUtil::MakeShapeWithDescendingLayout(PRED /*arbitrary*/,
output_dims_in_tiles),
- &ir_builder_);
+ &b_);
// Output tile origin is the index for the first element of the current output
// tile.
const IrArray::Index output_tile_origin = [&] {
IrArray::Index index = output_tile_index;
for (int i = 1; i < 3; ++i) {
- index[i] = ir_builder_.CreateMul(output_tile_index[i],
- index_typed_constant(kTileSize),
- "tile_origin." + std::to_string(i));
+ index[i] =
+ b_.CreateMul(output_tile_index[i], index_typed_constant(kTileSize),
+ "tile_origin." + std::to_string(i));
}
return index;
}();
@@ -3010,16 +3129,15 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
std::vector<llvm::Value*> output_tile_bounds(3);
for (int i = 1; i < 3; ++i) {
// Only last row or column may not have full size.
- output_tile_bounds[i] = ir_builder_.CreateSelect(
- ir_builder_.CreateICmpEQ(
- output_tile_index[i],
- index_typed_constant(output_dims_in_tiles[i] - 1)),
+ output_tile_bounds[i] = b_.CreateSelect(
+ b_.CreateICmpEQ(output_tile_index[i],
+ index_typed_constant(output_dims_in_tiles[i] - 1)),
index_typed_constant(reduced_output_dims[i] -
(output_dims_in_tiles[i] - 1) * kTileSize),
index_typed_constant(kTileSize), "kTileSize");
}
- KernelSupportLibrary ksl(&ir_builder_, llvm_ir::UnrollMode::kDefaultUnroll);
+ KernelSupportLibrary ksl(&b_, llvm_ir::UnrollMode::kDefaultUnroll);
// Curry a few parameters to EmitTiledElementalCodeWithBoundsCheck.
auto emit_tiled_elemental_code_with_bounds_check =
@@ -3028,13 +3146,13 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
const std::function<void(const IrArray::Index&, llvm::Value*)>&
emit_elem_function) {
EmitTiledElementalCodeWithBoundsCheck(
- kTileSize, kNumRows, index, loop_name, &ksl, &ir_builder_, y, x,
- tile_width, tile_height, emit_elem_function);
+ kTileSize, kNumRows, index, loop_name, &ksl, &b_, y, x, tile_width,
+ tile_height, emit_elem_function);
};
// Adds `addend` to the given `dim` of `index`.
auto offset_dim = [&](IrArray::Index index, llvm::Value* addend, int64 dim) {
- index[dim] = ir_builder_.CreateAdd(index[dim], addend);
+ index[dim] = b_.CreateAdd(index[dim], addend);
return index;
};
const IrArray::Index input_index =
@@ -3050,19 +3168,17 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
llvm::Value* shmem_buffer = param_shmem_buffers[id];
// TODO(jlebar): Add AA metadata to this store. Tile buffers are
// global variables, so LLVM can't infer much about it.
- ir_builder_.CreateStore(
- input_in_logical_shape.EmitReadArrayElement(index, &ir_builder_,
+ b_.CreateStore(
+ input_in_logical_shape.EmitReadArrayElement(index, &b_,
"input_element"),
- ir_builder_.CreateGEP(shmem_buffer,
- {index_typed_constant(0), y_loc, x}));
+ b_.CreateGEP(shmem_buffer, {index_typed_constant(0), y_loc, x}));
}
});
// Wait for all threads to reach this point, lest we copy a value from tile to
// output before the other thread copies it from input to tile.
// This is `__syncthreads` in CUDA.
- llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {},
- &ir_builder_);
+ llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::nvvm_barrier0, {}, {}, &b_);
llvm_ir::TiledParameterInfo tiled_param_info(param_shmem_buffers, y, x);
@@ -3076,27 +3192,26 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
output_index, "output", output_tile_bounds[2], output_tile_bounds[1],
[&](const IrArray::Index& index, llvm::Value* y_loc) {
// TODO(jlebar): Add AA metadata to this load.
- llvm::Instruction* load_from_shmem_buffer = ir_builder_.CreateLoad(
- ir_builder_.CreateGEP(param_shmem_buffers[0],
- {ir_builder_.getInt64(0), x, y_loc}),
+ llvm::Instruction* load_from_shmem_buffer = b_.CreateLoad(
+ b_.CreateGEP(param_shmem_buffers[0], {b_.getInt64(0), x, y_loc}),
"output_element");
output_in_reduced_shape_arrays[0].EmitWriteArrayElement(
- index, load_from_shmem_buffer, &ir_builder_);
+ index, load_from_shmem_buffer, &b_);
});
} else {
CHECK_EQ(hlo->opcode(), HloOpcode::kFusion);
emit_tiled_elemental_code_with_bounds_check(
output_index, "output", output_tile_bounds[2], output_tile_bounds[1],
[&](const IrArray::Index& index, llvm::Value* y_loc) {
- GpuElementalIrEmitter elem_emitter(hlo_module_config_, module_,
- &ir_builder_, GetNestedComputer());
+ GpuElementalIrEmitter elem_emitter(hlo_module_config_, module_, &b_,
+ GetNestedComputer());
FusedIrEmitter fused_emitter(param_arrays, &elem_emitter);
tiled_param_info.set_y(y_loc);
fused_emitter.SetTiledParameterInfo(&tiled_param_info);
TF_CHECK_OK(hlo->fused_expression_root()->Accept(&fused_emitter));
IrArray::Index untiled_index = llvm_ir::GetUnreducedOutputIndex(
index, output_reduced_shapes[0], output_arrays[0].GetShape(),
- &ir_builder_);
+ &b_);
const llvm_ir::ElementGenerator& output_generator =
fused_emitter.GetRootGenerator();
llvm::Value* output_value =
@@ -3107,12 +3222,11 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
output_in_reduced_shape_arrays.size());
for (int64 i = 0; i < output_in_reduced_shape_arrays.size(); ++i) {
output_in_reduced_shape_arrays[i].EmitWriteArrayElement(
- index, ir_builder_.CreateExtractValue(output_value, i),
- &ir_builder_);
+ index, b_.CreateExtractValue(output_value, i), &b_);
}
} else {
output_in_reduced_shape_arrays[0].EmitWriteArrayElement(
- index, output_value, &ir_builder_);
+ index, output_value, &b_);
}
});
}
@@ -3123,7 +3237,7 @@ LaunchDimensions IrEmitterUnnested::EmitHlo021Tile(
for (int64 i = 0; i < output_arrays.size(); ++i) {
tuple_operand_ptrs.push_back(output_arrays[i].GetBasePointer());
}
- llvm_ir::EmitTuple(GetIrArray(*hlo, *hlo), tuple_operand_ptrs, &ir_builder_,
+ llvm_ir::EmitTuple(GetIrArray(*hlo, *hlo), tuple_operand_ptrs, &b_,
module_);
}
@@ -3174,6 +3288,40 @@ bool IrEmitterUnnested::CheckAndEmitHloWithTile021(HloInstruction* hlo) {
return false;
}
+ // Each of our shared memory tiles has 32*33 elements (so ~4kb, if the
+ // elements are of size 4 bytes), and CUDA has an architectural limit of 48kb
+ // shared memory per SM. (This is increased to 96kb in Volta, but we don't
+ // use this, in part because it eats into our L1 cache space.)
+ //
+ // For correctness we need to ensure that we don't make more than 48kb worth
+ // of shmem tiles per block. And for performance, we'd probably like to use
+ // significantly less, so that we can fit more than one block at a time on a
+ // gpu core.
+ //
+ // We say without benchmarks that we want at least 3 threads/block,
+ // corresponding to 3 shmem tiles if the elements are 32 bits wide. We choose
+ // which params get the shmem transpose treatment arbitrarily; it's not clear
+ // if there's a Right Choice.
+ //
+ // This is only sound if tiled transposes are the only place where we use
+ // shared memory in fusions. If in the future other fusile ops use shared
+ // memory, we'll have to adjust this heuristic.
+ constexpr int kMinBlocksPerCore = 3;
+ constexpr int64 kShmemPerCore = 48 * 1024;
+ int64 shmem_used = 0;
+ for (int64 i = 0; i < params_012.size(); ++i) {
+ const HloInstruction* operand = hlo->operand(params_012[i]);
+ shmem_used +=
+ 32 * 33 *
+ ShapeUtil::ByteSizeOfPrimitiveType(operand->shape().element_type());
+
+ if (kMinBlocksPerCore * shmem_used > kShmemPerCore) {
+ // Erase this element and everything after it from params_012.
+ params_012.resize(i);
+ break;
+ }
+ }
+
VLOG(3) << "EmitHlo021Tile Emitting hlo tile 0-2-1" << hlo->ToString();
thunk_sequence_->emplace_back(
BuildKernelThunk(hlo, /*implements_whole_instruction=*/true));
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
index a1cc38401c..616d8a2206 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h
@@ -77,6 +77,7 @@ class IrEmitterUnnested : public IrEmitter {
Status HandleOutfeed(HloInstruction* outfeed) override;
Status HandleRng(HloInstruction* random) override;
Status HandleSelect(HloInstruction* select) override;
+ Status HandleSort(HloInstruction* sort) override;
Status HandleTupleSelect(HloInstruction* tuple_select) override;
Status HandleCrossReplicaSum(HloInstruction* crs) override;
Status HandleAfterAll(HloInstruction* gen_token) override;
@@ -118,7 +119,7 @@ class IrEmitterUnnested : public IrEmitter {
// Emits code that reduces a matrix of shape [height x width] to a vector of
// [width]. Other parameters have the same meaning as those of
// `EmitReductionToVector`. Note that input shape might not be
- // [height x width], but can be bitcast to [height x weight] with "height"
+ // [height x width], but can be bitcast to [height x width] with "height"
// being the major dimension.
Status EmitColumnReduction(
int64 height, int64 width, HloInstruction* reduce,
@@ -134,7 +135,7 @@ class IrEmitterUnnested : public IrEmitter {
// Emits code that reduces a 3D tensor of shape [depth x height x width] to a
// vector of shape [height]. Other parameters have the same meaning as those
// of `EmitReductionToVector`. Note that input shape might not be
- // [depth x height x width], but can be bitcast to [depth x height x weight]
+ // [depth x height x width], but can be bitcast to [depth x height x width]
// with "depth" being the most major dimension.
Status EmitRowReduction(
int64 depth, int64 height, int64 width, HloInstruction* reduce,
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
index 7de8f9e1ee..eb93efc560 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/BUILD
@@ -17,12 +17,12 @@ cc_library(
name = "llvm_gpu_backend",
srcs = [
"dump_ir_pass.cc",
- "gpu_backend_lib.cc",
+ "nvptx_backend_lib.cc",
"utils.cc",
],
hdrs = [
"dump_ir_pass.h",
- "gpu_backend_lib.h",
+ "nvptx_backend_lib.h",
"utils.h",
],
deps = [
@@ -34,6 +34,7 @@ cc_library(
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
+ "@llvm//:amdgpu_code_gen",
"@llvm//:analysis",
"@llvm//:bit_reader",
"@llvm//:bit_writer",
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc
index a4e4e85bf3..6c1c20fc04 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
+#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.h"
#include <map>
#include <memory>
@@ -114,20 +114,21 @@ static string GetLibdeviceFilename(const string& libdevice_dir_path,
// Gets the GPU name as it's known to LLVM for a given compute capability. If
// we see an unrecognized compute capability, we return "sm_30".
static string GetSmName(std::pair<int, int> compute_capability) {
- static auto* m = new std::map<std::pair<int, int>, int>({{{2, 0}, 20},
- {{2, 1}, 21},
- {{3, 0}, 30},
- {{3, 2}, 32},
- {{3, 5}, 35},
- {{3, 7}, 37},
- {{5, 0}, 50},
- {{5, 2}, 52},
- {{5, 3}, 53},
- {{6, 0}, 60},
- {{6, 1}, 61},
- {{6, 2}, 62},
- // TODO: Change this to 70 once LLVM NVPTX supports it
- {{7, 0}, 60}});
+ static auto* m = new std::map<std::pair<int, int>, int>(
+ {{{2, 0}, 20},
+ {{2, 1}, 21},
+ {{3, 0}, 30},
+ {{3, 2}, 32},
+ {{3, 5}, 35},
+ {{3, 7}, 37},
+ {{5, 0}, 50},
+ {{5, 2}, 52},
+ {{5, 3}, 53},
+ {{6, 0}, 60},
+ {{6, 1}, 61},
+ {{6, 2}, 62},
+ // TODO: Change this to 70 once LLVM NVPTX supports it
+ {{7, 0}, 60}});
int sm_version = 30;
auto it = m->find(compute_capability);
if (it != m->end()) {
@@ -206,7 +207,7 @@ std::unique_ptr<llvm::TargetMachine> GetTargetMachine(
codegen_opt_level = CodeGenOpt::None;
}
return WrapUnique(target->createTargetMachine(
- triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx42", target_options,
+ triple.str(), llvm_ir::AsStringRef(cpu_name), "+ptx60", target_options,
Optional<Reloc::Model>(RelocModel), Optional<CodeModel::Model>(CMModel),
codegen_opt_level));
}
@@ -319,8 +320,8 @@ Status LinkLibdeviceIfNecessary(llvm::Module* module,
llvm::Linker linker(*module);
string libdevice_path = tensorflow::io::JoinPath(
- libdevice_dir_path, GetLibdeviceFilename(libdevice_dir_path,
- compute_capability));
+ libdevice_dir_path,
+ GetLibdeviceFilename(libdevice_dir_path, compute_capability));
TF_RETURN_IF_ERROR(tensorflow::Env::Default()->FileExists(libdevice_path));
VLOG(1) << "Linking with libdevice from: " << libdevice_path;
std::unique_ptr<llvm::Module> libdevice_module =
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.h
index 0a345191d3..54e0e140de 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.h
@@ -14,8 +14,8 @@ limitations under the License.
==============================================================================*/
// LLVM-based compiler backend.
-#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LLVM_GPU_BACKEND_GPU_BACKEND_LIB_H_
-#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LLVM_GPU_BACKEND_GPU_BACKEND_LIB_H_
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LLVM_GPU_BACKEND_NVPTX_BACKEND_LIB_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LLVM_GPU_BACKEND_NVPTX_BACKEND_LIB_H_
#include <string>
#include <utility>
@@ -44,4 +44,4 @@ StatusOr<string> CompileToPtx(llvm::Module* module,
} // namespace gpu
} // namespace xla
-#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LLVM_GPU_BACKEND_GPU_BACKEND_LIB_H_
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_LLVM_GPU_BACKEND_NVPTX_BACKEND_LIB_H_
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
index ea661b3c2c..f95fbb01f9 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include <string>
#include <utility>
+#include "tensorflow/compiler/xla/layout_util.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
@@ -71,7 +72,6 @@ bool GpuMultiOutputFusion::ShapesCompatibleForFusion(HloInstruction* instr1,
// In that case, the operand of the reduce needs to have the same shape
// as the other tuple operands, but also we need to compare the output
// shapes of the reduces.
- // TODO(tjoerg): Allow differences in fp precision.
auto* element_instr_1 = get_element_instr(instr1);
auto* element_instr_2 = get_element_instr(instr2);
if (element_instr_1->opcode() == HloOpcode::kReduce &&
@@ -80,8 +80,8 @@ bool GpuMultiOutputFusion::ShapesCompatibleForFusion(HloInstruction* instr1,
return false;
}
// The elementwise output shapes must be the same (including layout).
- return ShapeUtil::Equal(get_element_shape(element_instr_1),
- get_element_shape(element_instr_2));
+ return ShapeUtil::EqualIgnoringFpPrecision(
+ get_element_shape(element_instr_1), get_element_shape(element_instr_2));
}
namespace {
@@ -107,6 +107,27 @@ bool IsInputFusibleReduction(HloInstruction* instr) {
return IsReductionToVector(*instr);
}
}
+
+// The code emitted for reduction suffers from poor data locality if the layouts
+// of input parameters differ. In such situtations it is beneficial not to fuse.
+// We consider input params with maximum rank only. Params with smaller ranks
+// will be broadcasted and have not been observed to cause data locality issues.
+// TODO(b/110927656): Improve reduce emitters to remove this limitation.
+bool ReduceFriendlyInputLayouts(HloInstruction* instr) {
+ int64 max_rank = 0;
+ const Layout* max_rank_layout;
+ for (HloInstruction* param : instr->fused_parameters()) {
+ if (ShapeUtil::Rank(param->shape()) > max_rank) {
+ max_rank = ShapeUtil::Rank(param->shape());
+ max_rank_layout = &param->shape().layout();
+ }
+ }
+ return c_all_of(instr->fused_parameters(), [&](HloInstruction* param) {
+ return (ShapeUtil::Rank(param->shape()) < max_rank) ||
+ (LayoutUtil::Equal(param->shape().layout(), *max_rank_layout));
+ });
+}
+
} // namespace
bool GpuMultiOutputFusion::IsFusible(HloInstruction* instr) {
@@ -173,29 +194,41 @@ bool GpuMultiOutputFusion::DoProducerConsumerMultiOutputFusion() {
// fusions operands.
for (HloInstruction* consumer : computation()->MakeInstructionPostOrder()) {
if (consumer->user_count() == 0) {
+ VLOG(3) << consumer->name() << " has no users.";
continue;
}
if (!IsInputFusibleReduction(consumer)) {
+ VLOG(3) << consumer->name() << " is not an input-fusable reduction.";
continue;
}
+ VLOG(3) << consumer->name()
+ << " is a fusion candidate. Looking for fuseable operands.";
auto consumer_operands = consumer->operands();
for (size_t i = 0; i < consumer_operands.size(); ++i) {
HloInstruction* producer = consumer_operands[i];
if (!producer->IsFusable()) {
+ VLOG(3) << producer->name() << " is not fusable.";
continue;
}
const bool is_loop_fusion =
producer->opcode() == HloOpcode::kFusion &&
producer->fusion_kind() == HloInstruction::FusionKind::kLoop;
if (!is_loop_fusion) {
+ VLOG(3) << producer->name() << " is not a loop fusion.";
continue;
}
if (!ShapesCompatibleForFusion(producer, consumer)) {
+ VLOG(3) << producer->name() << " has an incompatible shape.";
+ continue;
+ }
+ if (!ReduceFriendlyInputLayouts(producer)) {
+ VLOG(3) << producer->name() << " has inputs with mixed layouts.";
continue;
}
// If we have already decided to fuse this producer, skip it.
if (ContainsKey(to_fuse, producer)) {
+ VLOG(3) << producer->name() << " will be fused with another consumer.";
continue;
}
// Do not fuse a producer if the other operands of the fusion are
@@ -204,6 +237,7 @@ bool GpuMultiOutputFusion::DoProducerConsumerMultiOutputFusion() {
return producer != operand &&
reachability()->IsReachable(producer, operand);
})) {
+ VLOG(3) << producer->name() << " would introduce a cycle when fused.";
break;
}
to_fuse.insert(producer);
diff --git a/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
index 979ea79243..451e49f23a 100644
--- a/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
+++ b/tensorflow/compiler/xla/service/gpu/multi_output_fusion_test.cc
@@ -27,7 +27,7 @@ namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace gpu {
-using InstructionFusionTest = HloTestBase;
+using MultiOutputFusionTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
@@ -40,10 +40,10 @@ const char kModulePrefix[] = R"(
scalar_mul_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
- ROOT mul.1 = f32[] add(scalar_lhs.1, scalar_rhs.1)
+ ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
})";
-TEST_F(InstructionFusionTest, MultiOutputFusionSiblingReduceAndReduceFusion) {
+TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceAndReduceFusion) {
// Fusion with reduce instruction root and a sibling reduce instruction
// sharing the same input param.
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
@@ -72,7 +72,7 @@ TEST_F(InstructionFusionTest, MultiOutputFusionSiblingReduceAndReduceFusion) {
op::Tuple(op::Reduce(), op::Reduce()));
}
-TEST_F(InstructionFusionTest, MultiOutputFusionDifferentReduceInputShapes) {
+TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceInputShapes) {
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[6400]{0} parameter(1)
@@ -99,7 +99,7 @@ TEST_F(InstructionFusionTest, MultiOutputFusionDifferentReduceInputShapes) {
ASSERT_FALSE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
}
-TEST_F(InstructionFusionTest, MultiOutputFusionDifferentReduceOutputShapes) {
+TEST_F(MultiOutputFusionTest, MultiOutputFusionDifferentReduceOutputShapes) {
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
fused_computation_1 {
p1.1 = f32[10,10]{1,0} parameter(1)
@@ -126,7 +126,7 @@ TEST_F(InstructionFusionTest, MultiOutputFusionDifferentReduceOutputShapes) {
ASSERT_FALSE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
}
-TEST_F(InstructionFusionTest, MultiOutputFusionSiblingReduceFusions) {
+TEST_F(MultiOutputFusionTest, MultiOutputFusionSiblingReduceFusions) {
// Two sibling fusions with reduce instruction roots sharing the same input
// param.
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
@@ -160,7 +160,7 @@ TEST_F(InstructionFusionTest, MultiOutputFusionSiblingReduceFusions) {
op::Tuple(op::Reduce(), op::Reduce()));
}
-TEST_F(InstructionFusionTest,
+TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingReduceAndReduceMultiOutputFusion) {
// Multi-output fusion with two reduce instructions root and a sibling reduce
// instruction sharing the same input param.
@@ -193,7 +193,7 @@ TEST_F(InstructionFusionTest,
op::Tuple(op::Reduce(), op::Reduce(), op::Reduce()));
}
-TEST_F(InstructionFusionTest,
+TEST_F(MultiOutputFusionTest,
MultiOutputFusionSiblingFusionCheckAgainstReduceOperand) {
// Verify that if we already have a multi-output fusion that we prefer to pick
// a reduce op from its operands for checking shape compatibility.
@@ -226,7 +226,7 @@ TEST_F(InstructionFusionTest,
ASSERT_FALSE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
}
-TEST_F(InstructionFusionTest, MultiOutputFusionTwoLoops) {
+TEST_F(MultiOutputFusionTest, MultiOutputFusionTwoLoops) {
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
@@ -255,7 +255,7 @@ TEST_F(InstructionFusionTest, MultiOutputFusionTwoLoops) {
op::Tuple(op::Multiply(), op::Divide()));
}
-TEST_F(InstructionFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
+TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
fused_add {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
@@ -282,7 +282,7 @@ TEST_F(InstructionFusionTest, ProducerConsumerFusionLoopFusionAndReduce) {
op::Tuple(op::Reduce(), op::Add()));
}
-TEST_F(InstructionFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
+TEST_F(MultiOutputFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
fused_select {
p1.1 = f32[2,2,2]{2,1,0} parameter(1)
@@ -323,7 +323,7 @@ TEST_F(InstructionFusionTest, ProducerConsumerFusionLoopFusionAndReduceFusion) {
op::Tuple(op::Reduce(), op::Reduce(), op::Select()));
}
-TEST_F(InstructionFusionTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
+TEST_F(MultiOutputFusionTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
fused_element_wise {
p0.1 = f32[2,2,2]{2,1,0} parameter(0)
@@ -349,5 +349,75 @@ TEST_F(InstructionFusionTest, ProducerConsumerFusionDoNotFuseLoopReduceFusion) {
ASSERT_FALSE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
}
+TEST_F(MultiOutputFusionTest,
+ ProducerConsumerFusionFp16LoopFusionAndReduceFusion) {
+ auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ fused_select {
+ p1.1 = f16[2,2,2]{2,1,0} parameter(1)
+ c0 = f16[] constant(0)
+ broadcast = f16[2,2,2]{2,1,0} broadcast(f16[] c0), dimensions={}
+ greater-than = pred[2,2,2]{2,1,0} greater-than(f32[2,2,2]{2,1,0} p1.1, f32[2,2,2]{2,1,0} broadcast)
+ p0.1 = f16[2,2,2]{2,1,0} parameter(0)
+ ROOT select = f16[2,2,2]{2,1,0} select(pred[2,2,2]{2,1,0} greater-than, f16[2,2,2]{2,1,0} p0.1, f16[2,2,2]{2,1,0} broadcast)
+ }
+ fused_reduce {
+ p0.2 = f16[2,2,2]{2,1,0} parameter(0)
+ convert = f32[2,2,2]{2,1,0} convert(p0.2)
+ c1 = f32[] constant(0)
+ r1 = f32[2,2]{1,0} reduce(convert, c1), dimensions={2}, to_apply=scalar_add_computation
+ mul = f32[2,2,2]{2,1,0} multiply(convert, convert)
+ r2 = f32[2,2]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add_computation
+ ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(r1, r2)
+ }
+ ENTRY reduce {
+ p0 = f16[2,2,2]{2,1,0} parameter(0)
+ p1 = f16[2,2,2]{2,1,0} parameter(1)
+ select = f16[2,2,2]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select
+ fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(select), kind=kInput, calls=fused_reduce
+ gte0 = f32[2,2]{1,0} get-tuple-element(fusion), index=0
+ gte1 = f32[2,2]{1,0} get-tuple-element(fusion), index=1
+ ROOT root = (f32[2,2]{1,0}, f32[2,2]{1,0}, f16[2,2,2]{2,1,0}) tuple(gte1, gte1, select)
+ })"))
+ .ValueOrDie();
+ ASSERT_TRUE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
+ SCOPED_TRACE(module->ToString());
+ const HloInstruction* root = module->entry_computation()->root_instruction();
+ EXPECT_THAT(root, op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
+ op::GetTupleElement()));
+ const HloInstruction* fusion = root->operand(0)->operand(0);
+ ASSERT_TRUE(fusion->IsMultiOutputFusion());
+ EXPECT_THAT(fusion->fused_expression_root(),
+ op::Tuple(op::Reduce(), op::Reduce(), op::Select()));
+}
+
+TEST_F(MultiOutputFusionTest,
+ ProducerConsumerFusionReduceUnfriendlyLoopFusion) {
+ auto module = ParseHloString(tensorflow::strings::StrCat(kModulePrefix, R"(
+ mixed_input_layouts_computation {
+ p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
+ p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
+ copy = f16[128,1024,32,32]{1,3,2,0} copy(p1.1)
+ c0 = f16[] constant(0)
+ broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}
+ greater-than = pred[128,1024,32,32]{1,3,2,0} greater-than(copy, broadcast)
+ ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
+ }
+ fused_reduce {
+ p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
+ convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
+ c0.2 = f32[] constant(0)
+ ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add_computation
+ }
+ ENTRY reduce {
+ p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
+ p1 = f16[128,1024,32,32]{1,3,2,0} parameter(1)
+ loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
+ reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
+ ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
+ })"))
+ .ValueOrDie();
+ ASSERT_FALSE(GpuMultiOutputFusion().Run(module.get()).ValueOrDie());
+}
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc
index e1da8d940c..2eefadebcd 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/service/gpu/gpu_compiler.h"
+#include "tensorflow/compiler/xla/service/gpu/nvptx_compiler.h"
#include <stdlib.h>
#include <atomic>
@@ -50,7 +50,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/service/gpu/ir_emission_utils.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emitter_context.h"
#include "tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.h"
-#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h"
+#include "tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/nvptx_backend_lib.h"
#include "tensorflow/compiler/xla/service/gpu/multi_output_fusion.h"
#include "tensorflow/compiler/xla/service/gpu/pad_insertion.h"
#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
@@ -96,8 +96,8 @@ limitations under the License.
namespace xla {
namespace gpu {
-/* static */ const char* GpuCompiler::kTargetTriple = "nvptx64-nvidia-cuda";
-/* static */ const char* GpuCompiler::kDataLayout =
+/* static */ const char* NVPTXCompiler::kTargetTriple = "nvptx64-nvidia-cuda";
+/* static */ const char* NVPTXCompiler::kDataLayout =
"e-i64:64-i128:128-v16:16-v32:32-n16:32:64";
namespace {
@@ -354,16 +354,30 @@ void WarnIfBadPtxasVersion(const string& ptxas_path) {
return;
}
+ // We need ptxas >= 9.0 as a hard requirement, because we compile targeting
+ // PTX 6.0. An older ptxas will just fail to compile any of our code.
+ //
// ptxas 9.0 before 9.0.276 and ptxas 9.1 before 9.1.121 miscompile some
// address calculations with large offsets (e.g. "load ptr + large_constant"),
// b/70245379.
- if ((vmaj == 9 && vmin == 0 && vdot < 276) ||
- (vmaj == 9 && vmin == 1 && vdot < 121)) {
- LOG(WARNING) << "*** WARNING *** You are using ptxas " << vmaj << "."
- << vmin << "." << vdot
- << ", which is in range [9.0.0, 9.0.276) + [9.1.0, 9.1.121). "
- "These versions are known to miscompile XLA code, leading "
- "to incorrect results or invalid-address errors.";
+ //
+ // ptxas 9.1.121 miscompiles some large multioutput fusions, again in a way
+ // that appears related to address calculations, b/111107644. ptxas 9.2.88
+ // appears to work, as far as we can tell.
+ if (vmaj < 9) {
+ LOG(ERROR)
+ << "You are using ptxas 8.x, but XLA requires ptxas 9.x (and strongly "
+ "prefers >= 9.2.88). Compilation of XLA kernels below will likely "
+ "fail.\n\nYou do not need to update CUDA; cherry-picking the ptxas "
+ "binary is sufficient.";
+ } else if ((vmaj < 9 || vmin < 2 || vdot < 88)) {
+ LOG(WARNING)
+ << "*** WARNING *** You are using ptxas " << vmaj << "." << vmin << "."
+ << vdot
+ << ", which older than 9.2.88. ptxas 9.x before 9.2.88 is known to "
+ "miscompile XLA code, leading to incorrect results or "
+ "invalid-address errors.\n\nYou do not need to update to CUDA "
+ "9.2.88; cherry-picking the ptxas binary is sufficient.";
}
}
@@ -391,17 +405,18 @@ void WarnIfBadDriverJITVersion() {
// - 384.x before 384.108
// - 387.x before 387.40
// - 390.x before 390.10.
- auto vmaj = std::get<0>(version);
- auto vmin = std::get<1>(version);
- if ((vmaj == 384 && vmin < 108) || //
- (vmaj == 387 && vmin < 40) || //
- (vmaj == 390 && vmin < 10)) {
+ //
+ // In addition, only >= 396.20 contains ptxas >= 9.2.88, which contains the
+ // fix for the "large multioutput fusions" miscompile, b/111107644.
+ if (version < std::make_tuple(396, 20, 0)) {
LOG(WARNING)
<< "*** WARNING *** Invoking the PTX->SASS JIT from driver version "
<< se::cuda::DriverVersionToString(version)
- << ", which is in range [384.0.0, 384.108.0) + [387.0.0, 387.40.0) + "
- "[390.0.0, 390.10.0). These versions are known to miscompile XLA "
- "code, leading to incorrect results or invalid-address errors.";
+ << ", which is older than 396.20.0. These versions are known to "
+ "miscompile XLA code, leading to incorrect results or "
+ "invalid-address errors.\nXLA only uses the driver JIT if it "
+ "cannot find ptxas; you don't need to update your driver if "
+ "you can point XLA to ptxas 9.2.88 or newer.";
}
});
}
@@ -473,14 +488,14 @@ StatusOr<std::vector<uint8>> CompilePtx(const string& ptx, int cc_major,
} // namespace
-GpuCompiler::GpuCompiler()
+NVPTXCompiler::NVPTXCompiler()
: pointer_size_(llvm::DataLayout(kDataLayout)
.getPointerSize(0 /* default address space */)) {}
-StatusOr<std::unique_ptr<HloModule>> GpuCompiler::RunHloPasses(
+StatusOr<std::unique_ptr<HloModule>> NVPTXCompiler::RunHloPasses(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
DeviceMemoryAllocator* device_allocator) {
- XLA_SCOPED_LOGGING_TIMER("GpuCompiler::RunHloPasses");
+ XLA_SCOPED_LOGGING_TIMER("NVPTXCompiler::RunHloPasses");
tracing::ScopedActivity activity("HLO Transforms", module->name(),
/*is_expensive=*/true);
TF_RETURN_IF_ERROR(
@@ -488,10 +503,10 @@ StatusOr<std::unique_ptr<HloModule>> GpuCompiler::RunHloPasses(
return std::move(module);
}
-StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
+StatusOr<std::unique_ptr<Executable>> NVPTXCompiler::RunBackend(
std::unique_ptr<HloModule> module, se::StreamExecutor* stream_exec,
DeviceMemoryAllocator* device_allocator) {
- XLA_SCOPED_LOGGING_TIMER("GpuCompiler::RunBackend");
+ XLA_SCOPED_LOGGING_TIMER("NVPTXCompiler::RunBackend");
TF_RET_CHECK(stream_exec != nullptr);
@@ -528,7 +543,7 @@ StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
BufferAssigner::Run(module.get(), hlo_schedule->ConsumeHloOrdering(),
BufferSizeBytesFunction(),
/*color_alignment=*/[](LogicalBuffer::Color) {
- return kCudaMallocAlignBytes;
+ return kXlaAllocatedBufferAlignBytes;
}));
// BufferAssignment::Stats::ToString() and BufferAssignment::ToString()
// include headers, so no need for us to print them ourselves.
@@ -551,7 +566,7 @@ StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
IrEmitterUnnested ir_emitter(module->config(), entry_computation,
&ir_emitter_context);
{
- XLA_SCOPED_LOGGING_TIMER("GpuCompiler::RunBackend - IR emission");
+ XLA_SCOPED_LOGGING_TIMER("NVPTXCompiler::RunBackend - IR emission");
TF_RETURN_IF_ERROR(entry_computation->Accept(&ir_emitter));
}
@@ -578,7 +593,8 @@ StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
}
{
- XLA_SCOPED_LOGGING_TIMER("GpuCompiler::RunBackend - Running LLVM verifier");
+ XLA_SCOPED_LOGGING_TIMER(
+ "NVPTXCompiler::RunBackend - Running LLVM verifier");
std::string err;
llvm::raw_string_ostream err_stream(err);
@@ -618,7 +634,7 @@ StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
string ptx;
{
- XLA_SCOPED_LOGGING_TIMER("GpuCompiler::RunBackend - CompileToPtx");
+ XLA_SCOPED_LOGGING_TIMER("NVPTXCompiler::RunBackend - CompileToPtx");
TF_ASSIGN_OR_RETURN(ptx, CompileToPtx(&llvm_module, {cc_major, cc_minor},
module->config(), libdevice_dir));
}
@@ -687,10 +703,10 @@ StatusOr<std::unique_ptr<Executable>> GpuCompiler::RunBackend(
return std::unique_ptr<Executable>(gpu_executable);
}
-std::vector<uint8> GpuCompiler::CompilePtxOrGetCachedResult(const string& ptx,
- int cc_major,
- int cc_minor) {
- XLA_SCOPED_LOGGING_TIMER("GpuCompiler::CompilePtxOrGetCachedResult");
+std::vector<uint8> NVPTXCompiler::CompilePtxOrGetCachedResult(const string& ptx,
+ int cc_major,
+ int cc_minor) {
+ XLA_SCOPED_LOGGING_TIMER("NVPTXCompiler::CompilePtxOrGetCachedResult");
tracing::ScopedActivity activity("PTX->CUBIN", /*is_expensive=*/true);
bool inserted;
decltype(compilation_cache_.begin()) iter;
@@ -763,12 +779,14 @@ std::vector<uint8> GpuCompiler::CompilePtxOrGetCachedResult(const string& ptx,
}
StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>>
-GpuCompiler::CompileAheadOfTime(std::vector<std::unique_ptr<HloModule>> module,
- const AotCompilationOptions& options) {
- return Unimplemented("not yet implemented: GpuCompiler::CompileAheadOfTime");
+NVPTXCompiler::CompileAheadOfTime(
+ std::vector<std::unique_ptr<HloModule>> module,
+ const AotCompilationOptions& options) {
+ return Unimplemented(
+ "not yet implemented: NVPTXCompiler::CompileAheadOfTime");
}
-se::Platform::Id GpuCompiler::PlatformId() const {
+se::Platform::Id NVPTXCompiler::PlatformId() const {
return se::cuda::kCudaPlatformId;
}
@@ -778,7 +796,7 @@ se::Platform::Id GpuCompiler::PlatformId() const {
static bool InitModule() {
xla::Compiler::RegisterCompilerFactory(
stream_executor::cuda::kCudaPlatformId,
- []() { return xla::MakeUnique<xla::gpu::GpuCompiler>(); });
+ []() { return xla::MakeUnique<xla::gpu::NVPTXCompiler>(); });
return true;
}
static bool module_initialized = InitModule();
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_compiler.h b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.h
index f3b02ae5d8..d4d2909f1b 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_compiler.h
+++ b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_COMPILER_H_
-#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_COMPILER_H_
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_NVPTX_COMPILER_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_NVPTX_COMPILER_H_
#include <memory>
#include <string>
@@ -37,10 +37,10 @@ namespace xla {
namespace gpu {
// The GPU compiler generates efficient GPU executables.
-class GpuCompiler : public LLVMCompiler {
+class NVPTXCompiler : public LLVMCompiler {
public:
- GpuCompiler();
- ~GpuCompiler() override {}
+ NVPTXCompiler();
+ ~NVPTXCompiler() override {}
// Bring in
// StatusOr<std::vector<std::unique_ptr<Executable>>> Compile(
@@ -64,7 +64,7 @@ class GpuCompiler : public LLVMCompiler {
se::Platform::Id PlatformId() const override;
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const override {
- // Capture just the pointer size, not the entire GpuCompiler object.
+ // Capture just the pointer size, not the entire NVPTXCompiler object.
int64 pointer_size = pointer_size_;
return [pointer_size](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, pointer_size);
@@ -146,10 +146,10 @@ class GpuCompiler : public LLVMCompiler {
CompilationCacheHash, CompilationCacheEq>
compilation_cache_ GUARDED_BY(mutex_);
- TF_DISALLOW_COPY_AND_ASSIGN(GpuCompiler);
+ TF_DISALLOW_COPY_AND_ASSIGN(NVPTXCompiler);
};
} // namespace gpu
} // namespace xla
-#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_GPU_COMPILER_H_
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_NVPTX_COMPILER_H_
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc b/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc
index 47744548b9..4aaf0c9e14 100644
--- a/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_manager.cc
@@ -23,25 +23,6 @@ limitations under the License.
namespace xla {
namespace gpu {
-void OutfeedManager::EnqueueOutfeedDestination(
- ShapeTree<std::unique_ptr<OutfeedBuffer>>* buffers) {
- tensorflow::mutex_lock l(mu_);
- enqueued_buffers_.push_back(buffers);
- cv_.notify_one();
-}
-
-ShapeTree<std::unique_ptr<OutfeedBuffer>>*
-OutfeedManager::BlockingGetNextOutfeedDestination() {
- tensorflow::mutex_lock l(mu_);
- while (enqueued_buffers_.empty()) {
- cv_.wait(l);
- }
- ShapeTree<std::unique_ptr<OutfeedBuffer>>* current_buffer =
- enqueued_buffers_.front();
- enqueued_buffers_.pop_front();
- return current_buffer;
-}
-
OutfeedManager* GetOrCreateOutfeedManager() {
static auto* manager = new OutfeedManager;
return manager;
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_manager.h b/tensorflow/compiler/xla/service/gpu/outfeed_manager.h
index f580c24e17..a752eb7011 100644
--- a/tensorflow/compiler/xla/service/gpu/outfeed_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_manager.h
@@ -16,10 +16,8 @@ limitations under the License.
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_MANAGER_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_OUTFEED_MANAGER_H_
-#include <deque>
-#include <vector>
-
#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/service/gpu/xfeed_queue.h"
#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
@@ -60,28 +58,7 @@ class OutfeedBuffer {
// Manages a thread-safe queue of buffers. The buffers are supposed to be
// produced by the transfer manager and consumed by the device.
-class OutfeedManager {
- public:
- // Adds a tree of buffers to the queue. The individual buffers correspond to
- // the elements of a tuple and may be nullptr if the buffer is a tuple index
- // buffer.
- void EnqueueOutfeedDestination(
- ShapeTree<std::unique_ptr<OutfeedBuffer>>* buffers);
-
- // Blocks until the queue is non-empty, then returns the buffer at the head of
- // the queue.
- ShapeTree<std::unique_ptr<OutfeedBuffer>>*
- BlockingGetNextOutfeedDestination();
-
- private:
- tensorflow::mutex mu_;
-
- // Condition variable that is signaled every time a buffer is enqueued.
- tensorflow::condition_variable cv_;
-
- // The queue of trees of buffers. OutfeedBuffer* queue contents are not owned.
- std::deque<ShapeTree<std::unique_ptr<OutfeedBuffer>>*> enqueued_buffers_;
-};
+using OutfeedManager = XfeedQueue<ShapeTree<std::unique_ptr<OutfeedBuffer>>*>;
// Singleton creator-or-accessor: Returns the GPU outfeed manager.
OutfeedManager* GetOrCreateOutfeedManager();
diff --git a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
index 4c0f1421e9..7986e63f43 100644
--- a/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/outfeed_thunk.cc
@@ -36,7 +36,7 @@ Status OutfeedThunk::ExecuteOnStream(
auto op_profiler = profiler->MakeScopedInstructionProfiler(hlo_instruction());
OutfeedManager* outfeed_manager = GetOrCreateOutfeedManager();
ShapeTree<std::unique_ptr<OutfeedBuffer>>* outfeed_buffers =
- outfeed_manager->BlockingGetNextOutfeedDestination();
+ outfeed_manager->BlockingGetNextDestination();
// Nothing to be done for empty tuples.
if (ShapeUtil::IsEmptyTuple(hlo_instruction()->operand(0)->shape())) {
diff --git a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc
index cd833ec7bd..3838fee674 100644
--- a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.cc
@@ -32,27 +32,27 @@ namespace gpu {
ParallelLoopEmitter::ParallelLoopEmitter(
BodyEmitter body_emitter, const Shape& shape,
- const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* ir_builder,
+ const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b,
int unroll_factor)
- : LoopEmitter(body_emitter, shape, ir_builder),
+ : LoopEmitter(body_emitter, shape, b),
launch_dimensions_(launch_dimensions),
unroll_factor_(unroll_factor) {}
ParallelLoopEmitter::ParallelLoopEmitter(
const llvm_ir::ElementGenerator& target_element_generator,
tensorflow::gtl::ArraySlice<llvm_ir::IrArray> target_arrays,
- const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* ir_builder,
+ const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b,
int unroll_factor)
- : LoopEmitter(target_element_generator, target_arrays, ir_builder),
+ : LoopEmitter(target_element_generator, target_arrays, b),
launch_dimensions_(launch_dimensions),
unroll_factor_(unroll_factor) {}
ParallelLoopEmitter::ParallelLoopEmitter(
const llvm_ir::ElementGenerator& target_element_generator,
const llvm_ir::IrArray& target_array,
- const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* ir_builder,
+ const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b,
int unroll_factor)
- : LoopEmitter(target_element_generator, target_array, ir_builder),
+ : LoopEmitter(target_element_generator, target_array, b),
launch_dimensions_(launch_dimensions),
unroll_factor_(unroll_factor) {}
@@ -74,29 +74,27 @@ ParallelLoopEmitter::EmitIndexAndSetExitBasicBlock(
CHECK_NE(index_type, nullptr);
std::vector<llvm_ir::IrArray::Index> array_indices;
llvm::Value* block_id = llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {}, ir_builder_);
+ llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, {}, {}, b_);
llvm_ir::AddRangeMetadata(0, launch_dimensions_.block_count(),
static_cast<llvm::Instruction*>(block_id));
- block_id = ir_builder_->CreateZExtOrTrunc(block_id, index_type, "block_id");
+ block_id = b_->CreateZExtOrTrunc(block_id, index_type, "block_id");
// Per the PTX documentation:
// "It is guaranteed that [...] 0 <= %tid.x < %ntid.x"
//
// %ntid.x is currently specified as 1024.
llvm::Value* thread_id = llvm_ir::EmitCallToIntrinsic(
- llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, ir_builder_);
+ llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, {}, {}, b_);
llvm_ir::AddRangeMetadata(0, launch_dimensions_.threads_per_block(),
static_cast<llvm::Instruction*>(thread_id));
- thread_id =
- ir_builder_->CreateZExtOrTrunc(thread_id, index_type, "thread_id");
-
- llvm::Value* linear_index_base = ir_builder_->CreateAdd(
- ir_builder_->CreateMul(
- block_id,
- llvm::ConstantInt::get(index_type,
- launch_dimensions_.threads_per_block()),
- "",
- /*HasNUW=*/true, /*HasNSW=*/true),
+ thread_id = b_->CreateZExtOrTrunc(thread_id, index_type, "thread_id");
+
+ llvm::Value* linear_index_base = b_->CreateAdd(
+ b_->CreateMul(block_id,
+ llvm::ConstantInt::get(
+ index_type, launch_dimensions_.threads_per_block()),
+ "",
+ /*HasNUW=*/true, /*HasNSW=*/true),
thread_id, "linear_index", /*HasNUW=*/true, /*HasNSW=*/true);
// Add an @llvm.assume(linear_index < threads_per_block * num_blocks).
@@ -109,41 +107,41 @@ ParallelLoopEmitter::EmitIndexAndSetExitBasicBlock(
// conditions in the same basic block as their operands.
llvm_ir::EmitCallToIntrinsic(
llvm::Intrinsic::assume,
- {ir_builder_->CreateICmpULT(
+ {b_->CreateICmpULT(
linear_index_base,
llvm::ConstantInt::get(index_type,
launch_dimensions_.threads_per_block() *
launch_dimensions_.block_count()),
"linear_index_in_range")},
- {}, ir_builder_);
+ {}, b_);
if (unroll_factor_ > 1) {
- linear_index_base = ir_builder_->CreateMul(
+ linear_index_base = b_->CreateMul(
linear_index_base, llvm::ConstantInt::get(index_type, unroll_factor_),
"linear_index_base", /*HasNUW=*/true, /*HasNSW=*/true);
}
- array_indices.emplace_back(linear_index_base, shape_, ir_builder_);
+ array_indices.emplace_back(linear_index_base, shape_, b_);
for (int i = 1; i < unroll_factor_; ++i) {
- llvm::Value* linear_index = ir_builder_->CreateAdd(
- linear_index_base, llvm::ConstantInt::get(index_type, i),
- "linear_index",
- /*HasNUW=*/true, /*HasNSW=*/true);
- array_indices.emplace_back(linear_index, shape_, ir_builder_);
+ llvm::Value* linear_index =
+ b_->CreateAdd(linear_index_base, llvm::ConstantInt::get(index_type, i),
+ "linear_index",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ array_indices.emplace_back(linear_index, shape_, b_);
}
auto if_in_bounds = llvm_ir::EmitIfThenElse(
- ir_builder_->CreateICmpULT(
+ b_->CreateICmpULT(
linear_index_base,
llvm::ConstantInt::get(index_type, ShapeUtil::ElementsIn(shape_))),
- llvm_ir::IrName(loop_name, "in_bounds"), ir_builder_, false);
+ llvm_ir::IrName(loop_name, "in_bounds"), b_, false);
// Set exit_bb_ to the exit block of the if structure.
exit_bb_ = if_in_bounds.after_block;
CHECK_NE(nullptr, exit_bb_);
// Set IR builder insertion point to the body of the if structure.
- llvm_ir::SetToFirstInsertPoint(if_in_bounds.true_block, ir_builder_);
+ llvm_ir::SetToFirstInsertPoint(if_in_bounds.true_block, b_);
return array_indices;
}
diff --git a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h
index 302e1bf1bc..b82a23419d 100644
--- a/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h
+++ b/tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h
@@ -34,13 +34,13 @@ class ParallelLoopEmitter : public llvm_ir::LoopEmitter {
// The meanings of other parameters are the same as LoopEmitter.
ParallelLoopEmitter(BodyEmitter body_emitter, const Shape& shape,
const LaunchDimensions& launch_dimensions,
- llvm::IRBuilder<>* ir_builder, int unroll_factor = 1);
+ llvm::IRBuilder<>* b, int unroll_factor = 1);
// Constructs a ParallelLoopEmitter from an element generator that generates
// each element of the given target array.
ParallelLoopEmitter(const llvm_ir::ElementGenerator& target_element_generator,
const llvm_ir::IrArray& target_array,
const LaunchDimensions& launch_dimensions,
- llvm::IRBuilder<>* ir_builder, int unroll_factor = 1);
+ llvm::IRBuilder<>* b, int unroll_factor = 1);
// Constructs a loop emitter for a loop that generates on element of each of N
// arrays on each iteration.
@@ -50,7 +50,7 @@ class ParallelLoopEmitter : public llvm_ir::LoopEmitter {
ParallelLoopEmitter(
const llvm_ir::ElementGenerator& target_element_generator,
tensorflow::gtl::ArraySlice<llvm_ir::IrArray> target_arrays,
- const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* ir_builder,
+ const LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b,
int unroll_factor = 1);
ParallelLoopEmitter(const ParallelLoopEmitter&) = delete;
diff --git a/tensorflow/compiler/xla/service/gpu/tests/BUILD b/tensorflow/compiler/xla/service/gpu/tests/BUILD
new file mode 100644
index 0000000000..686c3c16c9
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/BUILD
@@ -0,0 +1,223 @@
+# Description: GPU-specific XLA tests. For example, codegen tests that
+# verify the IR emitted.
+#
+# TODO(jlebar): None of these tests actually use the GPU, so they should not
+# need to run on machines with GPUs present.
+
+licenses(["notice"]) # Apache 2.0
+
+package(default_visibility = [":friends"])
+
+package_group(
+ name = "friends",
+ includes = [
+ "//tensorflow/compiler/xla:friends",
+ ],
+)
+
+# Filegroup used to collect source files for dependency checking.
+filegroup(
+ name = "c_srcs",
+ data = glob([
+ "**/*.cc",
+ "**/*.h",
+ ]),
+)
+
+load("//tensorflow:tensorflow.bzl", "tf_cc_test")
+
+cc_library(
+ name = "gpu_codegen_test",
+ testonly = True,
+ srcs = ["gpu_codegen_test.cc"],
+ hdrs = ["gpu_codegen_test.h"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla/legacy_flags:debug_options_flags",
+ "//tensorflow/compiler/xla/service:gpu_plugin",
+ "//tensorflow/compiler/xla/service/gpu:gpu_executable",
+ "//tensorflow/compiler/xla/tests:filecheck",
+ "//tensorflow/compiler/xla/tests:llvm_irgen_test_base",
+ "//tensorflow/core:lib",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_copy_test",
+ srcs = ["gpu_copy_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla:literal",
+ "//tensorflow/compiler/xla:literal_util",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_ftz_test",
+ srcs = ["gpu_ftz_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_index_test",
+ srcs = ["gpu_index_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla:literal",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla:xla_proto",
+ "//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/compiler/xla/service:hlo_module_config",
+ "//tensorflow/compiler/xla/service:hlo_parser",
+ "//tensorflow/compiler/xla/tests:hlo_test_base",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_infeed_test",
+ srcs = ["infeed_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla:literal",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:test_helpers",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/client:global_data",
+ "//tensorflow/compiler/xla/client:local_client",
+ "//tensorflow/compiler/xla/client/lib:arithmetic",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/compiler/xla/tests:client_library_test_base",
+ "//tensorflow/compiler/xla/tests:literal_test_util",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_kernel_tiling_test",
+ srcs = ["gpu_kernel_tiling_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/compiler/xla/service:hlo_module_config",
+ "//tensorflow/compiler/xla/service:hlo_parser",
+ "//tensorflow/compiler/xla/tests:hlo_test_base",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_ldg_test",
+ srcs = ["gpu_ldg_test.cc"],
+ tags = ["requires-gpu-sm35"],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla:literal",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_noalias_test",
+ srcs = ["gpu_noalias_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla:literal",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:util",
+ "//tensorflow/compiler/xla:xla_data_proto",
+ "//tensorflow/compiler/xla/service:hlo",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_fusion_test",
+ srcs = ["gpu_fusion_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla/service:hlo_module_config",
+ "//tensorflow/compiler/xla/service:hlo_parser",
+ "//tensorflow/compiler/xla/tests:hlo_test_base",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_unrolling_test",
+ srcs = ["gpu_unrolling_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla/service:hlo_module_config",
+ "//tensorflow/compiler/xla/service:hlo_parser",
+ "//tensorflow/compiler/xla/tests:hlo_test_base",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
+tf_cc_test(
+ name = "gpu_alignment_test",
+ testonly = True,
+ srcs = ["gpu_alignment_test.cc"],
+ tags = [
+ "requires-gpu-sm35",
+ ],
+ deps = [
+ ":gpu_codegen_test",
+ "//tensorflow/compiler/xla/service:gpu_plugin",
+ "//tensorflow/compiler/xla/service/cpu:custom_call_target_registry",
+ "//tensorflow/compiler/xla/service/llvm_ir:alias_analysis",
+ "//tensorflow/compiler/xla/tests:filecheck",
+ "//tensorflow/compiler/xla/tests:llvm_irgen_test_base",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_alignment_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_alignment_test.cc
new file mode 100644
index 0000000000..672c68e59b
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_alignment_test.cc
@@ -0,0 +1,54 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <memory>
+#include <utility>
+
+#include "tensorflow/compiler/xla/service/cpu/custom_call_target_registry.h"
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/alias_analysis.h"
+#include "tensorflow/compiler/xla/tests/filecheck.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+namespace {
+
+class GpuAlignmentTest : public GpuCodegenTest {};
+
+TEST_F(GpuAlignmentTest, Test) {
+ const char* hlo_string = R"(
+HloModule GpuAlignmentTest
+
+ENTRY main {
+ zero = f32[] constant(0)
+ tok = token[] after-all()
+ a = f32[100] parameter(0)
+ b_tup = (f32[200], token[]) infeed(tok)
+ b = f32[200] get-tuple-element(b_tup), index=0
+ a_padded = f32[150] pad(a, zero), padding=0_50
+ b_sliced = f32[150] slice(b), slice={[0:150]}
+ ROOT c = f32[150] add(a_padded, b_sliced)
+}
+)";
+
+ CompileAndVerifyIr(hlo_string, R"(
+CHECK: @fusion(i8* align 64 dereferenceable(600) %alloc0, i8* align 16 dereferenceable(400) %alloc1, i8* align 64 dereferenceable(864) %temp_buf)
+)");
+}
+
+} // namespace
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.cc
new file mode 100644
index 0000000000..4b8415fe91
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.cc
@@ -0,0 +1,50 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/legacy_flags/debug_options_flags.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/gpu_executable.h"
+#include "tensorflow/compiler/xla/tests/filecheck.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace xla {
+namespace gpu {
+
+std::unique_ptr<HloModule> GpuCodegenTest::CreateNewModuleWithFTZ(bool ftz) {
+ HloModuleConfig config;
+ auto debug_options = legacy_flags::GetDebugOptionsFromFlags();
+ debug_options.set_xla_gpu_ftz(ftz);
+ debug_options.set_xla_gpu_max_kernel_unroll_factor(1);
+ // TODO(b/38354253): Change tests to use Parameters instead of Constants.
+ debug_options.add_xla_disable_hlo_passes("constant_folding");
+ config.set_debug_options(debug_options);
+
+ return MakeUnique<HloModule>(TestName(), config);
+}
+
+void GpuCodegenTest::CompileAndVerifyPtx(std::unique_ptr<HloModule> hlo_module,
+ const string& pattern) {
+ std::unique_ptr<Executable> executable =
+ std::move(CompileToExecutable(std::move(hlo_module)).ValueOrDie());
+ string ptx_str =
+ std::string(static_cast<GpuExecutable*>(executable.get())->ptx());
+ StatusOr<bool> filecheck_result = RunFileCheck(ptx_str, pattern);
+ ASSERT_TRUE(filecheck_result.ok());
+ EXPECT_TRUE(filecheck_result.ValueOrDie());
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h b/tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h
new file mode 100644
index 0000000000..e4a3573bab
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h
@@ -0,0 +1,42 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TESTS_GPU_CODEGEN_TEST_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TESTS_GPU_CODEGEN_TEST_H_
+
+#include <string>
+
+#include "tensorflow/compiler/xla/tests/llvm_irgen_test_base.h"
+
+namespace xla {
+namespace gpu {
+
+// Tests that verify IR or PTX emitted by the GPU backend is as expected.
+class GpuCodegenTest : public LlvmIrGenTestBase {
+ protected:
+ // Like HloTestBase::CreateNewModule(), with a flag for configuring the ftz
+ // option.
+ std::unique_ptr<HloModule> CreateNewModuleWithFTZ(bool ftz);
+
+ // Compiles the given HLO module to PTX and verifies the PTX matches the given
+ // FileCheck pattern. (See http://llvm.org/docs/CommandGuide/FileCheck.html).
+ void CompileAndVerifyPtx(std::unique_ptr<HloModule> hlo_module,
+ const string& pattern);
+};
+
+} // namespace gpu
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TESTS_GPU_CODEGEN_TEST_H_
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_copy_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_copy_test.cc
new file mode 100644
index 0000000000..ce69e058e6
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_copy_test.cc
@@ -0,0 +1,59 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <memory>
+#include <utility>
+
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/literal_util.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/hlo_opcode.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+
+class GpuCopyTest : public GpuCodegenTest {};
+
+// The GPU backend should not emit a copy kernel for the kCopy instruction in
+// this test. Instead, it should generate a CopyThunk which invokes cuMemcpy at
+// runtime.
+TEST_F(GpuCopyTest, UseMemcpy) {
+ HloComputation::Builder builder(TestName());
+
+ std::unique_ptr<Literal> literal =
+ LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
+ HloInstruction* constant = builder.AddInstruction(
+ HloInstruction::CreateConstant(std::move(literal)));
+ builder.AddInstruction(HloInstruction::CreateUnary(
+ constant->shape(), HloOpcode::kCopy, constant));
+
+ std::unique_ptr<HloComputation> computation = builder.Build();
+
+ auto hlo_module = CreateNewModule();
+ hlo_module->AddEntryComputation(std::move(computation));
+
+ // There should not be any kernel prefixed "copy".
+ CompileAndVerifyIr(std::move(hlo_module), "; CHECK-NOT: define void @_copy",
+ /*match_optimized_ir=*/false);
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_ftz_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_ftz_test.cc
new file mode 100644
index 0000000000..177b94934c
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_ftz_test.cc
@@ -0,0 +1,119 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+
+// Check that the ftz (flush denormals to zero) flag is reflected in PTX as
+// expected.
+
+namespace xla {
+namespace gpu {
+namespace {
+
+class GpuFtzTest : public GpuCodegenTest {
+ public:
+ explicit GpuFtzTest(bool ftz) : ftz_(ftz) {}
+
+ // Creates an HLO module that performs the given binary operation on some
+ // data.
+ std::unique_ptr<HloModule> CreateBinaryOpModule(HloOpcode op) {
+ HloComputation::Builder builder(TestName());
+
+ Shape param_shape = ShapeUtil::MakeShapeWithLayout(
+ F32, /*dimensions=*/{100, 100}, /*minor_to_major=*/{1, 0});
+ HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
+ /* parameter_number=*/0, param_shape, "x"));
+ HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
+ /* parameter_number=*/1, param_shape, "y"));
+ builder.AddInstruction(HloInstruction::CreateBinary(param_shape, op, x, y));
+
+ auto hlo_module = CreateNewModuleWithFTZ(ftz_);
+ hlo_module->AddEntryComputation(builder.Build());
+ return hlo_module;
+ }
+
+ // Creates an HLO module that performs the given unary operation on some data.
+ std::unique_ptr<HloModule> CreateUnaryOpModule(HloOpcode op) {
+ HloComputation::Builder builder(TestName());
+
+ Shape param_shape = ShapeUtil::MakeShapeWithLayout(
+ F32, /*dimensions=*/{100, 100}, /*minor_to_major=*/{1, 0});
+ HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
+ /* parameter_number=*/0, param_shape, "x"));
+ builder.AddInstruction(HloInstruction::CreateUnary(param_shape, op, x));
+
+ auto hlo_module = CreateNewModuleWithFTZ(ftz_);
+ hlo_module->AddEntryComputation(builder.Build());
+ return hlo_module;
+ }
+
+ bool ftz_;
+};
+
+class GpuFtzEnabledTest : public GpuFtzTest {
+ public:
+ GpuFtzEnabledTest() : GpuFtzTest(/*ftz=*/true) {}
+};
+
+class GpuFtzDisabledTest : public GpuFtzTest {
+ public:
+ GpuFtzDisabledTest() : GpuFtzTest(/*ftz=*/false) {}
+};
+
+// Check that we emit mul.ftz.f32 when in ftz mode, and plain mul.f32 otherwise.
+TEST_F(GpuFtzEnabledTest, MultiplyFtz) {
+ CompileAndVerifyPtx(CreateBinaryOpModule(HloOpcode::kMultiply), R"(
+ CHECK-NOT: mul.f32
+ CHECK: mul.ftz.f32
+ CHECK-NOT: mul.f32
+ )");
+}
+TEST_F(GpuFtzDisabledTest, MultiplyFtz) {
+ CompileAndVerifyPtx(CreateBinaryOpModule(HloOpcode::kMultiply), R"(
+ CHECK-NOT: mul.ftz.f32
+ CHECK: mul.f32
+ CHECK-NOT: mul.ftz.f32
+ )");
+}
+
+// In NVPTX, exp(float) is implemented in libdevice, and consults __nvvm_reflect
+// to determine whether or not ftz is enabled. The implementation uses two
+// calls to ex2.approx. When ftz is on, we get two calls to the ftz version;
+// when ftz is off, we get one call to the ftz version and one call to the
+// regular version.
+TEST_F(GpuFtzEnabledTest, ExpFtz) {
+ CompileAndVerifyPtx(CreateUnaryOpModule(HloOpcode::kExp), R"(
+ CHECK-NOT: ex2.approx.f32
+ CHECK: ex2.approx.ftz.f32
+ CHECK-NOT: ex2.approx.f32
+ CHECK: ex2.approx.ftz.f32
+ CHECK-NOT: ex2.approx.f32
+ CHECK-NOT: ex2.approx.ftz.f32
+ )");
+}
+
+TEST_F(GpuFtzDisabledTest, ExpFtz) {
+ CompileAndVerifyPtx(CreateUnaryOpModule(HloOpcode::kExp), R"(
+ CHECK-NOT: ex2.approx.f32
+ CHECK-DAG: ex2.approx.ftz.f32
+ CHECK-DAG: ex2.approx.f32
+ CHECK-NOT: ex2.approx.f32
+ CHECK-NOT: ex2.approx.ftz.f32
+ )");
+}
+
+} // namespace
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_fusion_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_fusion_test.cc
new file mode 100644
index 0000000000..674b436a8e
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_fusion_test.cc
@@ -0,0 +1,59 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <utility>
+
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_module_config.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+namespace {
+
+class GpuFusionTest : public GpuCodegenTest {};
+
+TEST_F(GpuFusionTest, FusedReshape) {
+ const char* hlo_text = R"(
+ HloModule test_module
+
+ fused_computation {
+ p0.param_0 = f32[4,1,1]{2,1,0} parameter(0)
+ p1.param_1 = f32[4,1]{1,0} parameter(1)
+ reshape = f32[4,1]{1,0} reshape(p0.param_0)
+ ROOT add = f32[4,1] add(reshape, p1.param_1)
+ }
+
+ ENTRY BroadcastIntoAdd {
+ p0 = f32[4,1,1]{2,1,0} parameter(0)
+ p1 = f32[4,1]{1,0} parameter(1)
+ ROOT fusion = f32[4,1]{1,0} fusion(p0, p1), kind=kLoop,
+ calls=fused_computation
+ }
+)";
+
+ CompileAndVerifyIr(hlo_text,
+ R"(
+; CHECK-LABEL: @fusion
+; CHECK: fadd
+; CHECK: }
+ )");
+}
+
+} // namespace
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_index_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_index_test.cc
new file mode 100644
index 0000000000..e5958165ef
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_index_test.cc
@@ -0,0 +1,147 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <memory>
+#include <utility>
+
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/service/hlo_module_config.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
+#include "tensorflow/compiler/xla/xla.pb.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+
+// This file tests the index expressions used to reference source tensors. When
+// the destination tensor and source tensor have compatible shapes, the linear
+// index is used to access the source tensor. Otherwise, dimensional indices
+// computed from the linear index are used to access the source tensor.
+
+class GpuIndexTest : public GpuCodegenTest {};
+
+TEST_F(GpuIndexTest, CompatibleUseLinearIndex) {
+ HloComputation::Builder builder(TestName());
+
+ auto param_shape = ShapeUtil::MakeShape(F32, {5, 7, 2});
+ HloInstruction* param_x = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, param_shape, "x"));
+ HloInstruction* param_y = builder.AddInstruction(
+ HloInstruction::CreateParameter(1, param_shape, "y"));
+ builder.AddInstruction(HloInstruction::CreateBinary(
+ ShapeUtil::MakeShape(PRED, {5, 7, 2}), HloOpcode::kGe, param_x, param_y));
+
+ auto hlo_module = CreateNewModule();
+ hlo_module->AddEntryComputation(builder.Build());
+
+ // Check the optimized IR as the unoptimized IR contains dead udiv and urem.
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-NOT: udiv
+; CHECK-NOT: urem
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuIndexTest, CompatibleUseLinearIndexWithReshape) {
+ HloModuleConfig config;
+ config.set_debug_options(HloTestBase::GetDebugOptionsForTest());
+ auto module = ParseHloString(R"(
+ HloModule test_module
+
+ ENTRY CompatibleUseLinearIndexWithReshape {
+ x = f32[5,7,2]{2,1,0} parameter(0)
+ y = f32[5,14]{1,0} parameter(1)
+ reshape = f32[5,7,2]{2,1,0} reshape(y)
+ ROOT gte = pred[5,7,2]{2,1,0} greater-than-or-equal-to(x, reshape)
+ })",
+ config)
+ .ValueOrDie();
+
+ // Check the optimized IR as the unoptimized IR contains dead udiv and urem.
+ CompileAndVerifyIr(std::move(module),
+ R"(
+; CHECK-NOT: udiv
+; CHECK-NOT: urem
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuIndexTest, CompatibleUseLinearIndexWithReshapeAndBroadcast) {
+ HloModuleConfig config;
+ config.set_debug_options(HloTestBase::GetDebugOptionsForTest());
+ auto module = ParseHloString(R"(
+ HloModule test_module
+
+ ENTRY CompatibleUseLinearIndexWithReshape {
+ x = f32[5,7,2]{2,1,0} parameter(0)
+ y = f32[14]{0} parameter(1)
+ reshape = f32[7,2]{1,0} reshape(y)
+ broadcast = f32[5,7,2]{2,1,0} broadcast(reshape), dimensions={1,2}
+ ROOT gte = pred[5,7,2]{2,1,0} greater-than-or-equal-to(x, broadcast)
+ })",
+ config)
+ .ValueOrDie();
+
+ // Check the optimized IR reuses the linear index by calculating modulo 14.
+ CompileAndVerifyIr(std::move(module),
+ R"(
+; CHECK: %[[urem1:.*]] = urem i{{[0-9]*}} %[[linear_index:.*]], 14
+; CHECK: %[[bitcast:.*]] = bitcast i8 addrspace(1)* %[[alloc:.*]] to float addrspace(1)*
+; CHECK: %[[idx1:.*]] = zext i{{[0-9]*}} %[[urem1]] to i64
+; CHECK: getelementptr inbounds float, float addrspace(1)* %[[bitcast]], i64 %[[idx1]]
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuIndexTest, CompatibleUseLinearIndexWithSizeOneDimensions) {
+ HloModuleConfig config;
+ auto debug_options = HloTestBase::GetDebugOptionsForTest();
+ debug_options.set_xla_gpu_max_kernel_unroll_factor(1);
+ config.set_debug_options(debug_options);
+
+ auto module = ParseHloString(R"(
+ HloModule test_module
+
+ ENTRY CompatibleUseLinearIndexWithSizeOneDimensions {
+ x = f32[1,1024,1,256]{3,2,1,0} parameter(0)
+ ROOT y = f16[1,1024,1,256]{2,3,1,0} convert(x)
+ })",
+ config)
+ .ValueOrDie();
+
+ // Check that the unoptimized IR reuses the linear index.
+ CompileAndVerifyIr(std::move(module),
+ R"(
+; CHECK-LABEL: @fusion
+; CHECK: udiv i32 %[[linear_index:.*]], 262144
+; CHECK: %[[ld_addr:.*]] = getelementptr inbounds float, float* {{.*}}, i32 %[[linear_index]]
+; CHECK: load float, float* %[[ld_addr]]
+; CHECK: %[[st_addr:.*]] = getelementptr inbounds half, half* {{.*}}, i32 %[[linear_index]]
+; CHECK: store half {{.*}}, half* %[[st_addr]]
+ )",
+ /*match_optimized_ir=*/false);
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_kernel_tiling_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_kernel_tiling_test.cc
new file mode 100644
index 0000000000..cca35316f0
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_kernel_tiling_test.cc
@@ -0,0 +1,177 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <utility>
+
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_module_config.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+namespace {
+
+class GpuKernelTilingTest : public GpuCodegenTest {
+ protected:
+ GpuKernelTilingTest() {
+ auto debug_options = HloTestBase::GetDebugOptionsForTest();
+ config_.set_debug_options(debug_options);
+ // Disable layout_assignment to use the preassigned layouts.
+ debug_options.add_xla_disable_hlo_passes("layout_assignment");
+ }
+ HloModuleConfig config_;
+};
+
+TEST_F(GpuKernelTilingTest, UnnestedTransposeWithProperDimensionsTiled) {
+ const char *const kHloString = R"(
+ HloModule unnested_transpose_1
+
+ ENTRY unnested_transpose_1 {
+ para0 = f16[32,3,64]{2,1,0} parameter(0)
+ ROOT copy1 = f16[32,3,64]{1,0,2} copy(para0)
+ })";
+
+ // Check that a call to llvm.nvvm.barrier0 is generated.
+ auto hlo_module = ParseHloString(kHloString, config_).ValueOrDie();
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: define void @copy
+; CHECK: tail call void @llvm.nvvm.barrier0()
+; CHECK: }
+)",
+ /*match_optimized_ir=*/true);
+
+ // Check that the kernel runs correctly.
+ EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{0.0}));
+}
+
+TEST_F(GpuKernelTilingTest, UnnestedTransposeWithSmallDimensionsNotTiled) {
+ const char *const kHloString = R"(
+ HloModule unnested_transpose_2
+
+ ENTRY unnested_transpose_2 {
+ para0 = f16[2,3,64]{2,1,0} parameter(0)
+ ROOT copy1 = f16[2,3,64]{1,0,2} copy(para0)
+ })";
+
+ // Check that a call to llvm.nvvm.barrier0 is not generated.
+ auto hlo_module = ParseHloString(kHloString, config_).ValueOrDie();
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: define void @copy
+; CHECK-NOT: tail call void @llvm.nvvm.barrier0()
+; CHECK: }
+)",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuKernelTilingTest, SimpleFusionWithTransposeTiled) {
+ const char *const kHloString = R"(
+ HloModule multiple_output_fusion_1
+ fused_computation.1 {
+ param0 = f32[4,5,6,7,8]{4,3,2,1,0} parameter(0)
+ copy = f32[4,5,6,7,8]{2,1,4,3,0} copy(param0)
+ ROOT convert = f16[4,5,6,7,8]{2,1,4,3,0} convert(copy)
+ }
+
+ ENTRY copy_in_fusion_run_without_hlo_passes {
+ para0 = f32[4,5,6,7,8]{4,3,2,1,0} parameter(0)
+ ROOT fusion.1 = f16[4,5,6,7,8]{2,1,4,3,0} fusion(para0), kind=kLoop,
+ calls=fused_computation.1
+ })";
+
+ // Check that a call to llvm.nvvm.barrier0 is generated.
+ auto hlo_module = ParseHloString(kHloString, config_).ValueOrDie();
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: define void @fusion
+; CHECK: tail call void @llvm.nvvm.barrier0()
+; CHECK: }
+)",
+ /*match_optimized_ir=*/true);
+
+ // Check that the kernel runs correctly.
+ EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{0.0}));
+}
+
+TEST_F(GpuKernelTilingTest, MultipleOutputFusionWithOnePossibleTransposeTiled) {
+ const char *const kHloString = R"(
+ HloModule multiple_output_fusion_1
+ fused_computation.1 {
+ param0 = f16[8,31,31,65]{3,2,1,0} parameter(0)
+ param1 = f16[8,31,31,65]{3,2,1,0} parameter(1)
+ copy0 = f16[8,31,31,65]{2,1,3,0} copy(param0)
+ copy1 = f16[8,31,31,65]{2,1,3,0} copy(param1)
+ ROOT tuple1 = (f16[8,31,31,65]{2,1,3,0}, f16[8,31,31,65]{2,1,3,0})
+ tuple(copy0, copy1)
+ }
+
+ ENTRY multiple_output_fusion_1 {
+ para0 = f16[8,31,31,65]{3,2,1,0} parameter(0)
+ para1 = f16[8,31,31,65]{3,2,1,0} parameter(1)
+ ROOT fusion.1 = (f16[8,31,31,65]{2,1,3,0}, f16[8,31,31,65]{2,1,3,0})
+ fusion(para0,para1), kind=kLoop, calls=fused_computation.1
+ })";
+
+ // Check that a call to llvm.nvvm.barrier0 is generated.
+ auto hlo_module = ParseHloString(kHloString, config_).ValueOrDie();
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: define void @fusion
+; CHECK: tail call void @llvm.nvvm.barrier0()
+; CHECK: }
+)",
+ /*match_optimized_ir=*/true);
+
+ // Check that the kernel runs correctly.
+ EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{0.0}));
+}
+
+TEST_F(GpuKernelTilingTest,
+ MultipleOutputFusionWithTwoPossibleTransposesNotTiled) {
+ const char *const kHloString = R"(
+ HloModule multiple_output_fusion_2
+ fused_computation.1 {
+ param0 = f16[8,31,31,65]{3,2,1,0} parameter(0)
+ param1 = f16[8,31,31,65]{1,3,2,0} parameter(1)
+ copy2 = f16[8,31,31,65]{2,1,3,0} copy(param0)
+ copy3 = f16[8,31,31,65]{2,1,3,0} copy(param1)
+ ROOT tuple1 = (f16[8,31,31,65]{2,1,3,0}, f16[8,31,31,65]{2,1,3,0})
+ tuple(copy2, copy3)
+ }
+
+ ENTRY multiple_output_fusion_2 {
+ para0 = f16[8,31,31,65]{3,2,1,0} parameter(0)
+ para1 = f16[8,31,31,65]{1,3,2,0} parameter(1)
+ ROOT fusion1 = (f16[8,31,31,65]{2,1,3,0}, f16[8,31,31,65]{2,1,3,0})
+ fusion(para0,para1), kind=kLoop, calls=fused_computation.1
+ })";
+
+ // Check that a call to llvm.nvvm.barrier0 is not generated.
+ auto hlo_module = ParseHloString(kHloString, config_).ValueOrDie();
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: define void @fusion
+; CHECK-NOT: tail call void @llvm.nvvm.barrier0()
+; CHECK: }
+)",
+ /*match_optimized_ir=*/true);
+}
+
+} // namespace
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_ldg_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_ldg_test.cc
new file mode 100644
index 0000000000..6c9ae7bada
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_ldg_test.cc
@@ -0,0 +1,141 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+// Tests that we emit ld.global.nc (the PTX instruction corresponding to CUDA's
+// __ldg builtin) for reads of buffers that don't change during a kernel's
+// execution.
+
+#include <memory>
+#include <utility>
+
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+
+class GpuLdgTest : public GpuCodegenTest {};
+
+// Parameters are never overwritten, so parameter reads should get ld.global.nc
+// reads.
+TEST_F(GpuLdgTest, LdgForParamRead) {
+ HloComputation::Builder builder(TestName());
+
+ auto shape = ShapeUtil::MakeShape(F32, {2, 2});
+ HloInstruction* param =
+ builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
+ builder.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
+ std::unique_ptr<HloComputation> computation = builder.Build();
+
+ auto hlo_module = CreateNewModule();
+ hlo_module->AddEntryComputation(std::move(computation));
+
+ CompileAndVerifyPtx(std::move(hlo_module), R"(
+ CHECK-NOT: ld.global.f32
+ CHECK: ld.global.nc.f32
+ )");
+}
+
+// Check that reading a buffer produced by a non-parameter HLO also results in
+// ld.global.nc, if that buffer isn't modified within the instruction that reads
+// it.
+TEST_F(GpuLdgTest, LdgForNonParamRead) {
+ HloComputation::Builder builder(TestName());
+
+ auto shape = ShapeUtil::MakeShape(F32, {2, 2});
+ HloInstruction* param =
+ builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
+ HloInstruction* add = builder.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param, param));
+ HloInstruction* square = builder.AddInstruction(
+ HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, add, add));
+ builder.AddInstruction(HloInstruction::CreateTuple({add, square}));
+ std::unique_ptr<HloComputation> computation = builder.Build();
+
+ auto hlo_module = CreateNewModule();
+ hlo_module->AddEntryComputation(std::move(computation));
+
+ CompileAndVerifyPtx(std::move(hlo_module), R"(
+ CHECK: {
+ CHECK-NOT: ld.global.f32
+ CHECK: ld.global.nc.f32
+ CHECK: }
+ )");
+}
+
+// Check that reading a buffer that's modified in-place does not produce
+// ld.global.nc.
+//
+// We do this by creating a reduce that feeds into a sin. We don't currently
+// fuse sin into reduce, and the sin is elementwise, so it reuses its input
+// buffer as its output.
+//
+// It seems like a fair bet that we won't start fusing sin into the output of
+// reduce in the foreseeable future. But if that turns out to be wrong, I give
+// you, future reader, permission to delete this test.
+TEST_F(GpuLdgTest, NoLdgWhenSharingBuffer) {
+ auto hlo_module = CreateNewModule();
+ HloComputation::Builder builder(TestName());
+
+ HloComputation* reduce_computation;
+ {
+ auto embedded_builder = HloComputation::Builder("add");
+ auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
+ 0, ShapeUtil::MakeShape(F32, {}), "lhs"));
+ auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
+ 1, ShapeUtil::MakeShape(F32, {}), "rhs"));
+ embedded_builder.AddInstruction(
+ HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
+ reduce_computation =
+ hlo_module->AddEmbeddedComputation(embedded_builder.Build());
+ }
+
+ auto param_shape = ShapeUtil::MakeShape(F32, {2, 2});
+ auto reduce_shape = ShapeUtil::MakeShape(F32, {2});
+ HloInstruction* param = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, param_shape, "x"));
+ HloInstruction* reduce = builder.AddInstruction(HloInstruction::CreateReduce(
+ reduce_shape,
+ builder.AddInstruction(HloInstruction::CreateBinary(
+ param_shape, HloOpcode::kAdd, param, param)),
+ builder.AddInstruction(
+ HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))),
+ {0}, reduce_computation));
+ builder.AddInstruction(
+ HloInstruction::CreateUnary(reduce_shape, HloOpcode::kSin, reduce));
+
+ std::unique_ptr<HloComputation> computation = builder.Build();
+ hlo_module->AddEntryComputation(std::move(computation));
+
+ CompileAndVerifyPtx(std::move(hlo_module), R"(
+ CHECK-LABEL: .entry sin
+ CHECK: {
+ CHECK-NOT: ld.global.nc.f32
+ CHECK: ld.global.f32
+ CHECK: }
+ )");
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_noalias_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_noalias_test.cc
new file mode 100644
index 0000000000..c42e5704a4
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_noalias_test.cc
@@ -0,0 +1,68 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <memory>
+#include <utility>
+
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_module.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+
+class GpuNoAliasTest : public GpuCodegenTest {};
+
+TEST_F(GpuNoAliasTest, Concat) {
+ HloComputation::Builder builder(TestName());
+
+ auto param_shape = ShapeUtil::MakeShape(F32, {2, 2});
+ HloInstruction* param_x = builder.AddInstruction(
+ HloInstruction::CreateParameter(0, param_shape, "x"));
+ HloInstruction* param_y = builder.AddInstruction(
+ HloInstruction::CreateParameter(1, param_shape, "y"));
+ HloInstruction* concat =
+ builder.AddInstruction(HloInstruction::CreateConcatenate(
+ ShapeUtil::MakeShape(F32, {2, 4}), {param_x, param_y}, 1));
+ builder.AddInstruction(HloInstruction::CreateConcatenate(
+ ShapeUtil::MakeShape(F32, {2, 6}), {concat, param_x}, 1));
+
+ std::unique_ptr<HloComputation> computation = builder.Build();
+
+ auto hlo_module = CreateNewModule();
+ hlo_module->AddEntryComputation(std::move(computation));
+
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK: %[[x_gep:.*]] = getelementptr inbounds [2 x [2 x float]], [2 x [2 x float]]* %x{{.*}}, i32 0
+; CHECK: load float, float* %[[x_gep]], {{.*}}, !noalias ![[param_noalias:.*]]
+; CHECK: %[[y_gep:.*]] = getelementptr inbounds [2 x [2 x float]], [2 x [2 x float]]* %y{{.*}}, i32 0
+; CHECK: load float, float* %[[y_gep]], {{.*}}, !noalias ![[param_noalias]]
+; CHECK: %[[result_ptr:.*]] = bitcast [2 x [6 x float]]* %fusion{{.*}} to float*
+; CHECK: %[[result_gep:.*]] = getelementptr inbounds float, float* %[[result_ptr]]
+; CHECK: store float {{.*}}, float* %[[result_gep]], !alias.scope ![[param_noalias]]
+; CHECK: ![[param_noalias]] = !{![[retval_buffer:.*]]}
+ )",
+ /*match_optimized_ir=*/false);
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/gpu_unrolling_test.cc b/tensorflow/compiler/xla/service/gpu/tests/gpu_unrolling_test.cc
new file mode 100644
index 0000000000..9622936306
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/gpu_unrolling_test.cc
@@ -0,0 +1,185 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <utility>
+
+#include "tensorflow/compiler/xla/service/gpu/tests/gpu_codegen_test.h"
+#include "tensorflow/compiler/xla/service/hlo_module_config.h"
+#include "tensorflow/compiler/xla/service/hlo_parser.h"
+#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace xla {
+namespace gpu {
+namespace {
+
+class GpuUnrollingTest : public GpuCodegenTest {};
+
+const char *const kAddModule = R"(
+ HloModule test_module
+
+ fused_computation {
+ p0.param_0 = f32[2,2]{1,0} parameter(0)
+ p1.param_1 = f32[2,2]{1,0} parameter(1)
+ ROOT add = f32[2,2] add(p0.param_0, p1.param_1)
+ }
+
+ ENTRY BroadcastIntoAdd {
+ p0 = f32[2,2]{1,0} parameter(0)
+ p1 = f32[2,2]{1,0} parameter(1)
+ ROOT fusion = f32[2,2]{1,0} fusion(p0, p1), kind=kLoop,
+ calls=fused_computation
+ })";
+
+TEST_F(GpuUnrollingTest, DoNotUnroll) {
+ HloModuleConfig config;
+ auto debug_options = HloTestBase::GetDebugOptionsForTest();
+ debug_options.set_xla_gpu_max_kernel_unroll_factor(1);
+ config.set_debug_options(debug_options);
+ auto hlo_module = ParseHloString(kAddModule, config).ValueOrDie();
+
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: @fusion
+; CHECK: fadd
+; CHECK-NOT: fadd
+; CHECK: }
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuUnrollingTest, UnrollFourTimes) {
+ HloModuleConfig config;
+ auto debug_options = HloTestBase::GetDebugOptionsForTest();
+ // We request a factor of 8, but the computation works on 4 elements, limiting
+ // the maximum unroll factor.
+ debug_options.set_xla_gpu_max_kernel_unroll_factor(8);
+ config.set_debug_options(debug_options);
+ auto hlo_module = ParseHloString(kAddModule, config).ValueOrDie();
+
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: @fusion
+; CHECK: fadd
+; CHECK: fadd
+; CHECK: fadd
+; CHECK: fadd
+; CHECK-NOT: fadd
+; CHECK: }
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuUnrollingTest, UnrollDefaultTimes) {
+ // The default unrolling factor is 4.
+ HloModuleConfig config;
+ config.set_debug_options(legacy_flags::GetDebugOptionsFromFlags());
+ auto hlo_module = ParseHloString(kAddModule, config).ValueOrDie();
+
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: @fusion
+; CHECK: load <4 x float>
+; CHECK: fadd
+; CHECK: fadd
+; CHECK: fadd
+; CHECK: fadd
+; CHECK-NOT: fadd
+; CHECK: store <4 x float>
+; CHECK: }
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuUnrollingTest, UnrollUnfusedAdd) {
+ HloModuleConfig config;
+ auto debug_options = HloTestBase::GetDebugOptionsForTest();
+ debug_options.set_xla_gpu_max_kernel_unroll_factor(4);
+ config.set_debug_options(debug_options);
+
+ const char *const kUnfusedAddModule = R"(
+ HloModule test_module
+
+ ENTRY AddFunc {
+ p0 = f32[2,2]{1,0} parameter(0)
+ p1 = f32[2,2]{1,0} parameter(1)
+ ROOT add = f32[2,2]{1,0} add(p0, p1)
+ })";
+ auto hlo_module = ParseHloString(kUnfusedAddModule, config).ValueOrDie();
+
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: @add
+; CHECK: load <4 x float>
+; CHECK: fadd
+; CHECK: fadd
+; CHECK: fadd
+; CHECK: fadd
+; CHECK-NOT: fadd
+; CHECK: store <4 x float>
+; CHECK: }
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+TEST_F(GpuUnrollingTest, UnrollMultiOutputFusion) {
+ HloModuleConfig config;
+ auto debug_options = HloTestBase::GetDebugOptionsForTest();
+ debug_options.set_xla_gpu_max_kernel_unroll_factor(2);
+ config.set_debug_options(debug_options);
+
+ const char *const kMultiOutputFusionModule = R"(
+ HloModule test_module
+
+ fused_computation {
+ p0.param_0 = f32[2,2]{1,0} parameter(0)
+ p1.param_1 = f32[2,2]{1,0} parameter(1)
+ add = f32[2,2]{1,0} add(p0.param_0, p1.param_1)
+ mul = f32[2,2]{1,0} multiply(p0.param_0, p1.param_1)
+ ROOT tuple = (f32[2,2]{1,0}, f32[2,2]{1,0}) tuple(add, mul)
+ }
+
+ ENTRY BroadcastIntoAdd {
+ p0 = f32[2,2]{1,0} parameter(0)
+ p1 = f32[2,2]{1,0} parameter(1)
+ ROOT fusion = (f32[2,2]{1,0}, f32[2,2]{1,0}) fusion(p0, p1), kind=kLoop,
+ calls=fused_computation
+ })";
+ auto hlo_module =
+ ParseHloString(kMultiOutputFusionModule, config).ValueOrDie();
+
+ CompileAndVerifyIr(std::move(hlo_module),
+ R"(
+; CHECK-LABEL: @fusion
+; CHECK: load <2 x float>
+; CHECK: load <2 x float>
+; CHECK-NOT: load <2 x float>
+; CHECK: fadd
+; CHECK: fmul
+; CHECK: fadd
+; CHECK: fmul
+; CHECK: store <2 x float>
+; CHECK: store <2 x float>
+; CHECK-NOT: store <2 x float>
+; CHECK-NOT: fadd
+; CHECK-NOT: fmul
+; CHECK: }
+ )",
+ /*match_optimized_ir=*/true);
+}
+
+} // namespace
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/tests/infeed_test.cc b/tensorflow/compiler/xla/service/gpu/tests/infeed_test.cc
new file mode 100644
index 0000000000..ba5cd2d84d
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/tests/infeed_test.cc
@@ -0,0 +1,121 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <unistd.h>
+#include <memory>
+
+#include "tensorflow/compiler/xla/client/global_data.h"
+#include "tensorflow/compiler/xla/client/lib/arithmetic.h"
+#include "tensorflow/compiler/xla/client/local_client.h"
+#include "tensorflow/compiler/xla/client/xla_client/xla_builder.h"
+#include "tensorflow/compiler/xla/literal.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/compiler/xla/test_helpers.h"
+#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
+#include "tensorflow/compiler/xla/tests/literal_test_util.h"
+#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/math/math_util.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace xla {
+namespace {
+
+class InfeedTest : public ClientLibraryTestBase {
+ protected:
+ // Transfers the given literal to the infeed interface of the device, and
+ // check if the returned data from Infeed HLO is same as the literal.
+ void TestInfeedRoundTrip(const Literal& literal) {
+ // TODO(b/30481585) Explicitly reset the Infeed state so that the
+ // test is not affected by the state from the previous tests.
+ ASSERT_IS_OK(client_->TransferToInfeed(literal));
+ XlaBuilder builder(TestName());
+ Infeed(&builder, literal.shape());
+ if (ShapeUtil::IsTuple(literal.shape())) {
+ // TODO(b/30609564): Use ComputeAndCompareLiteral instead.
+ ComputeAndCompareTuple(&builder, literal, {});
+ } else {
+ ComputeAndCompareLiteral(&builder, literal, {});
+ }
+ }
+};
+
+TEST_F(InfeedTest, SingleInfeedR0Bool) {
+ TestInfeedRoundTrip(*LiteralUtil::CreateR0<bool>(true));
+}
+
+TEST_F(InfeedTest, SingleInfeedR1U32) {
+ TestInfeedRoundTrip(*LiteralUtil::CreateR1<uint32>({1, 2, 3}));
+}
+
+TEST_F(InfeedTest, SingleInfeedR2F32) {
+ TestInfeedRoundTrip(*LiteralUtil::CreateR2F32Linspace(0.0, 1.0, 128, 64));
+}
+
+TEST_F(InfeedTest, SingleInfeedR3F32) {
+ TestInfeedRoundTrip(
+ *LiteralUtil::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}}));
+}
+
+TEST_F(InfeedTest, SingleInfeedR3F32DifferentLayout) {
+ const Layout r3_dim0minor = LayoutUtil::MakeLayout({0, 1, 2});
+ const Layout r3_dim0major = LayoutUtil::MakeLayout({2, 1, 0});
+
+ TestInfeedRoundTrip(*LiteralUtil::CreateR3WithLayout(
+ {{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}},
+ r3_dim0minor));
+
+ TestInfeedRoundTrip(*LiteralUtil::CreateR3WithLayout(
+ {{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
+ {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}},
+ r3_dim0major));
+}
+
+TEST_F(InfeedTest, SingleInfeedR4S32) {
+ TestInfeedRoundTrip(*LiteralUtil::CreateR4(
+ {{{{1, -2}, {-4, 5}, {6, 7}}, {{8, 9}, {10, 11}, {12, 13}}},
+ {{{10, 3}, {7, -2}, {3, 6}}, {{2, 5}, {-11, 5}, {-2, -5}}}}));
+}
+
+// Tests that a large infeed can be handled.
+TEST_F(InfeedTest, LargeInfeed) {
+ Array4D<float> array(80, 100, 8, 128);
+ array.FillIota(1.0f);
+ TestInfeedRoundTrip(*LiteralUtil::CreateR4FromArray4D<float>(array));
+}
+
+TEST_F(InfeedTest, SingleInfeedTuple) {
+ TestInfeedRoundTrip(
+ *LiteralUtil::MakeTuple({LiteralUtil::CreateR1<uint32>({1, 2, 3}).get(),
+ LiteralUtil::CreateR0<bool>(false).get()}));
+}
+
+TEST_F(InfeedTest, SingleInfeedEmptyTuple) {
+ TestInfeedRoundTrip(*LiteralUtil::MakeTuple({}));
+}
+
+// Tests that a large tuple infeed can be handled.
+TEST_F(InfeedTest, SingleInfeedLargeTuple) {
+ Array4D<float> array(40, 100, 8, 128);
+ array.FillIota(1.0f);
+ TestInfeedRoundTrip(*LiteralUtil::MakeTuple(
+ {LiteralUtil::CreateR4FromArray4D<float>(array).get(),
+ LiteralUtil::CreateR0<int32>(5).get()}));
+}
+
+} // namespace
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/thunk.cc b/tensorflow/compiler/xla/service/gpu/thunk.cc
new file mode 100644
index 0000000000..c78605cebb
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/thunk.cc
@@ -0,0 +1,59 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/gpu/thunk.h"
+
+namespace xla {
+namespace gpu {
+
+std::ostream& operator<<(std::ostream& os, Thunk::Kind kind) {
+ switch (kind) {
+ case Thunk::kConditional:
+ return os << "kConditional";
+ case Thunk::kConvolution:
+ return os << "kConvolution";
+ case Thunk::kCopy:
+ return os << "kCopy";
+ case Thunk::kCudnnBatchNormBackward:
+ return os << "kCudnnBatchNormBackward";
+ case Thunk::kCudnnBatchNormForwardInference:
+ return os << "kCudnnBatchNormForwardInference";
+ case Thunk::kCudnnBatchNormForwardTraining:
+ return os << "kCudnnBatchNormForwardTraining";
+ case Thunk::kFft:
+ return os << "kFft";
+ case Thunk::kGemm:
+ return os << "kGemm";
+ case Thunk::kInfeed:
+ return os << "kInfeed";
+ case Thunk::kKernel:
+ return os << "kKernel";
+ case Thunk::kMemset32BitValue:
+ return os << "kMemset32BitValue";
+ case Thunk::kMemzero:
+ return os << "kMemzero";
+ case Thunk::kOutfeed:
+ return os << "kOutfeed";
+ case Thunk::kSequential:
+ return os << "kSequential";
+ case Thunk::kTuple:
+ return os << "kTuple";
+ case Thunk::kWhile:
+ return os << "kWhile";
+ }
+}
+
+} // namespace gpu
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/thunk.h b/tensorflow/compiler/xla/service/gpu/thunk.h
index 99a1a0eae9..4df0bb005b 100644
--- a/tensorflow/compiler/xla/service/gpu/thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/thunk.h
@@ -41,7 +41,7 @@ class GpuExecutable;
// This is thread-compatible.
class Thunk {
public:
- enum class Kind {
+ enum Kind {
kConditional,
kConvolution,
kCopy,
@@ -111,6 +111,8 @@ class Thunk {
// A sequence of thunks.
using ThunkSequence = std::vector<std::unique_ptr<Thunk>>;
+std::ostream& operator<<(std::ostream& os, Thunk::Kind kind);
+
} // namespace gpu
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/gpu/xfeed_queue.h b/tensorflow/compiler/xla/service/gpu/xfeed_queue.h
new file mode 100644
index 0000000000..dd46ff433b
--- /dev/null
+++ b/tensorflow/compiler/xla/service/gpu/xfeed_queue.h
@@ -0,0 +1,90 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_
+
+#include <deque>
+#include <functional>
+#include <vector>
+
+#include "tensorflow/core/platform/mutex.h"
+#include "tensorflow/core/platform/notification.h"
+#include "tensorflow/core/platform/thread_annotations.h"
+
+namespace xla {
+namespace gpu {
+
+// TODO(b/30467474) Once GPU outfeed implementation settles, consider
+// folding back the cpu and gpu outfeed implementations into a generic
+// one if possible.
+
+// Manages a thread-safe queue of buffers.
+template <typename BufferType>
+class XfeedQueue {
+ public:
+ // Adds a tree of buffers to the queue. The individual buffers correspond to
+ // the elements of a tuple and may be nullptr if the buffer is a tuple index
+ // buffer.
+ void EnqueueDestination(BufferType buffers) {
+ tensorflow::mutex_lock l(mu_);
+ enqueued_buffers_.push_back(std::move(buffers));
+ cv_.notify_one();
+ }
+
+ // Blocks until the queue is non-empty, then returns the buffer at the head of
+ // the queue.
+ BufferType BlockingGetNextDestination() {
+ bool became_empty;
+ BufferType current_buffer;
+ {
+ tensorflow::mutex_lock l(mu_);
+ while (enqueued_buffers_.empty()) {
+ cv_.wait(l);
+ }
+ current_buffer = std::move(enqueued_buffers_.front());
+ enqueued_buffers_.pop_front();
+ became_empty = enqueued_buffers_.empty();
+ }
+ if (became_empty) {
+ for (const auto& callback : on_empty_callbacks_) {
+ callback();
+ }
+ }
+ return current_buffer;
+ }
+
+ void RegisterOnEmptyCallback(std::function<void()> callback) {
+ on_empty_callbacks_.push_back(std::move(callback));
+ }
+
+ private:
+ tensorflow::mutex mu_;
+
+ // Condition variable that is signaled every time a buffer is enqueued.
+ tensorflow::condition_variable cv_;
+
+ // The queue of trees of buffers. Buffer* queue contents are not owned.
+ std::deque<BufferType> enqueued_buffers_ GUARDED_BY(mu_);
+
+ // List of callbacks which will be called when 'enqueued_buffers_' becomes
+ // empty.
+ std::vector<std::function<void()>> on_empty_callbacks_;
+};
+
+} // namespace gpu
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_XFEED_QUEUE_H_
diff --git a/tensorflow/compiler/xla/service/hlo.proto b/tensorflow/compiler/xla/service/hlo.proto
index d241791060..63a8a813cd 100644
--- a/tensorflow/compiler/xla/service/hlo.proto
+++ b/tensorflow/compiler/xla/service/hlo.proto
@@ -155,6 +155,11 @@ message HloInstructionProto {
repeated int64 replica_group_ids = 44;
int64 all_reduce_id = 45;
string cross_replica_sum_barrier = 46;
+
+ // Whether this Send/Recv instruction transfers data to/from the host. Only
+ // present for Send and Recv instructions and their SendDone and RecvDone
+ // partners.
+ bool is_host_transfer = 47;
}
// Serialization of HloComputation.
@@ -239,8 +244,9 @@ message BufferAllocationProto {
int64 index = 1;
int64 size = 2;
bool is_thread_local = 3;
- bool is_reusable = 4;
+ bool is_tuple = 11;
bool is_entry_computation_parameter = 5;
+ bool is_constant = 12;
int64 parameter_number = 6;
repeated int64 parameter_shape_index = 10;
bool maybe_live_out = 7;
diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis.h b/tensorflow/compiler/xla/service/hlo_alias_analysis.h
index afb0c20f0c..1fea544730 100644
--- a/tensorflow/compiler/xla/service/hlo_alias_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_alias_analysis.h
@@ -42,7 +42,7 @@ class HloAliasAnalysis {
static StatusOr<std::unique_ptr<HloAliasAnalysis>> Run(
HloModule* module,
const HloDataflowAnalysis::FusionCanShareBufferFunction&
- fusion_can_share_buffer = nullptr);
+ fusion_can_share_buffer);
string ToString() const;
diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
index 403d4df6b5..da94ab5346 100644
--- a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc
@@ -47,7 +47,9 @@ class HloAliasAnalysisTest : public HloTestBase {
// reference to the generated analysis stored in analysis_.
HloAliasAnalysis& RunAnalysis() {
hlo_graph_dumper::MaybeDumpHloModule(*module_, "Before alias analysis");
- analysis_ = HloAliasAnalysis::Run(module_.get()).ConsumeValueOrDie();
+ analysis_ = HloAliasAnalysis::Run(module_.get(),
+ /*fusion_can_share_buffer=*/nullptr)
+ .ConsumeValueOrDie();
return *analysis_;
}
diff --git a/tensorflow/compiler/xla/service/hlo_computation.cc b/tensorflow/compiler/xla/service/hlo_computation.cc
index 166a83fade..441288da1a 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.cc
+++ b/tensorflow/compiler/xla/service/hlo_computation.cc
@@ -898,4 +898,13 @@ void HloComputation::UniquifyName(NameUniquer* name_uniquer) {
name_ = name_uniquer->GetUniqueName(name_);
}
+HloInstruction* HloComputation::GetInstructionWithName(
+ tensorflow::StringPiece name) {
+ auto instructions_in_computation = instructions();
+ auto it = c_find_if(instructions_in_computation, [&](HloInstruction* instr) {
+ return instr->name() == name;
+ });
+ return it == instructions_in_computation.end() ? nullptr : *it;
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_computation.h b/tensorflow/compiler/xla/service/hlo_computation.h
index abc1da4da3..49ed65910f 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.h
+++ b/tensorflow/compiler/xla/service/hlo_computation.h
@@ -365,6 +365,10 @@ class HloComputation {
unique_id_ = id;
}
+ // Returns the instruction in this computation that has name `name`. Returns
+ // null if there is no such computation.
+ HloInstruction* GetInstructionWithName(tensorflow::StringPiece name);
+
int64 unique_id() const { return unique_id_; }
private:
diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.cc b/tensorflow/compiler/xla/service/hlo_cost_analysis.cc
index c49cf7f5db..1f672502f7 100644
--- a/tensorflow/compiler/xla/service/hlo_cost_analysis.cc
+++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.cc
@@ -155,6 +155,10 @@ Status HloCostAnalysis::HandleConstant(const HloInstruction*) {
return Status::OK();
}
+Status HloCostAnalysis::HandleIota(const HloInstruction*) {
+ return Status::OK();
+}
+
Status HloCostAnalysis::HandleGetTupleElement(const HloInstruction*) {
// GetTupleElement forwards a pointer and does not touch each element in the
// output.
diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.h b/tensorflow/compiler/xla/service/hlo_cost_analysis.h
index 0181138a6d..82d650dc7b 100644
--- a/tensorflow/compiler/xla/service/hlo_cost_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.h
@@ -52,6 +52,7 @@ class HloCostAnalysis : public ConstDfsHloVisitor {
Status HandleElementwiseUnary(const HloInstruction* hlo) override;
Status HandleElementwiseBinary(const HloInstruction* hlo) override;
Status HandleConstant(const HloInstruction* constant) override;
+ Status HandleIota(const HloInstruction* iota) override;
Status HandleGetTupleElement(
const HloInstruction* get_tuple_element) override;
Status HandleSelect(const HloInstruction* hlo) override;
diff --git a/tensorflow/compiler/xla/service/hlo_cse_test.cc b/tensorflow/compiler/xla/service/hlo_cse_test.cc
index 76b9c66651..90fbaa37c5 100644
--- a/tensorflow/compiler/xla/service/hlo_cse_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_cse_test.cc
@@ -239,7 +239,7 @@ TEST_F(HloCseTest, IdenticalInstructions) {
EXPECT_EQ(5, computation->instruction_count());
EXPECT_THAT(tuple, op::Tuple(exp1, exp2, exp3));
- HloCSE cse(/*is_layout_sensitive=*/false);
+ HloCSE cse(/*is_layout_sensitive=*/true);
EXPECT_TRUE(cse.Run(module.get()).ValueOrDie());
EXPECT_EQ(3, computation->instruction_count());
@@ -248,6 +248,183 @@ TEST_F(HloCseTest, IdenticalInstructions) {
EXPECT_THAT(tuple, op::Tuple(first_operand, first_operand, first_operand));
}
+// Test two identical while loops with same inputs
+TEST_F(HloCseTest, WhileLoopsIdenticalConditionsAndBodiesSameInput) {
+ auto module = ParseHloString(R"(
+ HloModule WhileLoopsIdenticalConditionsAndBodiesSameInput
+
+ %body (param: (f32[], f32[])) -> (f32[], f32[]) {
+ %param = (f32[], f32[]) parameter(0)
+ %get-tuple-element = f32[] get-tuple-element((f32[], f32[]) %param),
+index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[]) %param),
+index=1 %add = f32[] add(f32[] %get-tuple-element, f32[] %get-tuple-element.1)
+ ROOT %tuple = (f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %add)
+ }
+
+ %condition (param.1: (f32[], f32[])) -> pred[] {
+ %param.1 = (f32[], f32[]) parameter(0)
+ ROOT %constant = pred[] constant(false)
+ }
+
+ %condition.1 (param.2: (f32[], f32[])) -> pred[] {
+ %param.2 = (f32[], f32[]) parameter(0)
+ ROOT %constant.1 = pred[] constant(false)
+ }
+
+ ENTRY %WhileLoopsIdenticalConditionsAndBodiesSameInput () -> (f32[], f32[])
+{ %constant.2 = f32[] constant(1) %constant.3 = f32[] constant(2) %tuple.1 =
+(f32[], f32[]) tuple(f32[] %constant.2, f32[] %constant.3) %while = (f32[],
+f32[]) while((f32[], f32[]) %tuple.1), condition=%condition, body=%body ROOT
+%while.1 = (f32[], f32[]) while((f32[], f32[]) %tuple.1),
+condition=%condition.1, body=%body
+ }
+ )")
+ .ValueOrDie();
+
+ auto computation = module->entry_computation();
+
+ EXPECT_EQ(5, computation->instruction_count());
+ HloCSE cse(true);
+ EXPECT_TRUE(cse.Run(module.get()).ValueOrDie());
+ EXPECT_EQ(4, computation->instruction_count());
+}
+
+// Test two while loops with same conditions, same inputs, but different
+// bodies
+TEST_F(HloCseTest, WhileLoopsIdenticalConditionsSameInputAndDifferentBodies) {
+ auto module = ParseHloString(R"(
+ HloModule WhileLoopsIdenticalConditionsSameInputAndDifferentBodies
+
+ %body (param: (f32[], f32[])) -> (f32[], f32[]) {
+ %param = (f32[], f32[]) parameter(0)
+ %get-tuple-element = f32[] get-tuple-element((f32[], f32[]) %param),
+index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[]) %param),
+index=1 %add = f32[] add(f32[] %get-tuple-element, f32[] %get-tuple-element.1)
+ ROOT %tuple = (f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %add)
+ }
+
+ %body2 (param.1: (f32[], f32[])) -> (f32[], f32[]) {
+ %param.1 = (f32[], f32[]) parameter(0)
+ %get-tuple-element.2 = f32[] get-tuple-element((f32[], f32[]) %param.1),
+index=0 %get-tuple-element.3 = f32[] get-tuple-element((f32[], f32[]) %param.1),
+index=1 %sub = f32[] subtract(f32[] %get-tuple-element.2, f32[]
+%get-tuple-element.3) ROOT %tuple.2 = (f32[], f32[]) tuple(f32[]
+%get-tuple-element.2, f32[] %sub)
+ }
+
+ %condition (param.2: (f32[], f32[])) -> pred[] {
+ %param.2 = (f32[], f32[]) parameter(0)
+ ROOT %constant = pred[] constant(false)
+ }
+
+ %condition.1 (param.3: (f32[], f32[])) -> pred[] {
+ %param.3 = (f32[], f32[]) parameter(0)
+ ROOT %constant.1 = pred[] constant(false)
+ }
+
+ ENTRY %WhileLoopsIdenticalConditionsSameInputAndDifferentBodies () ->
+(f32[], f32[]) { %constant.2 = f32[] constant(1) %constant.3 = f32[] constant(2)
+ %tuple.1 = (f32[], f32[]) tuple(f32[] %constant.2, f32[] %constant.3)
+ %while = (f32[], f32[]) while((f32[], f32[]) %tuple.1),
+condition=%condition, body=%body ROOT %while.1 = (f32[], f32[]) while((f32[],
+f32[]) %tuple.1), condition=%condition.1, body=%body2
+ }
+ )")
+ .ValueOrDie();
+
+ auto computation = module->entry_computation();
+
+ EXPECT_EQ(5, computation->instruction_count());
+ HloCSE cse(true);
+ EXPECT_FALSE(cse.Run(module.get()).ValueOrDie());
+ EXPECT_EQ(5, computation->instruction_count());
+}
+
+// Test two identical while loops with different inputs
+TEST_F(HloCseTest, WhileLoopsIdenticalConditionsAndBodiesDifferentInput) {
+ auto module = ParseHloString(R"(
+ HloModule WhileLoopsIdenticalConditionsAndBodiesDifferentInput
+
+ %body (param: (f32[], f32[])) -> (f32[], f32[]) {
+ %param = (f32[], f32[]) parameter(0)
+ %get-tuple-element = f32[] get-tuple-element((f32[], f32[]) %param),
+index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[]) %param),
+index=1 %add = f32[] add(f32[] %get-tuple-element, f32[] %get-tuple-element.1)
+ ROOT %tuple = (f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %add)
+ }
+
+ %condition (param.1: (f32[], f32[])) -> pred[] {
+ %param.1 = (f32[], f32[]) parameter(0)
+ ROOT %constant = pred[] constant(false)
+ }
+
+ %condition.1 (param.2: (f32[], f32[])) -> pred[] {
+ %param.2 = (f32[], f32[]) parameter(0)
+ ROOT %constant.1 = pred[] constant(false)
+ }
+
+ ENTRY %WhileLoopsIdenticalConditionsAndBodiesDifferentInput () -> (f32[],
+f32[]) { %constant.2 = f32[] constant(1) %constant.3 = f32[] constant(2)
+ %tuple.1 = (f32[], f32[]) tuple(f32[] %constant.2, f32[] %constant.3)
+ %while = (f32[], f32[]) while((f32[], f32[]) %tuple.1),
+condition=%condition, body=%body %constant.4 = f32[] constant(1) %constant.5 =
+f32[] constant(2) %tuple.2 = (f32[], f32[]) tuple(f32[] %constant.4, f32[]
+%constant.5) ROOT %while.1 = (f32[], f32[]) while((f32[], f32[]) %tuple.2),
+condition=%condition.1, body=%body
+ }
+
+ )")
+ .ValueOrDie();
+
+ auto computation = module->entry_computation();
+
+ EXPECT_EQ(8, computation->instruction_count());
+ HloCSE cse(true);
+ EXPECT_FALSE(cse.Run(module.get()).ValueOrDie());
+ EXPECT_EQ(8, computation->instruction_count());
+}
+
+// Test two while loops with identical bodies and same inputs, but different
+// conditions
+TEST_F(HloCseTest, WhileLoopsIdenticalBodiesAndInputDifferntConditions) {
+ auto module = ParseHloString(R"(
+ HloModule WhileLoopsIdenticalBodiesAndInputDifferntConditions
+
+ %body (param: (f32[], f32[])) -> (f32[], f32[]) {
+ %param = (f32[], f32[]) parameter(0)
+ %get-tuple-element = f32[] get-tuple-element((f32[], f32[]) %param),
+index=0 %get-tuple-element.1 = f32[] get-tuple-element((f32[], f32[]) %param),
+index=1 %add = f32[] add(f32[] %get-tuple-element, f32[] %get-tuple-element.1)
+ ROOT %tuple = (f32[], f32[]) tuple(f32[] %get-tuple-element, f32[] %add)
+ }
+
+ %condition (param.1: (f32[], f32[])) -> pred[] {
+ %param.1 = (f32[], f32[]) parameter(0)
+ ROOT %constant = pred[] constant(false)
+ }
+
+ %condition.1 (param.2: (f32[], f32[])) -> pred[] {
+ %param.2 = (f32[], f32[]) parameter(0)
+ ROOT %constant.1 = pred[] constant(true)
+ }
+
+ ENTRY %WhileLoopsIdenticalBodiesAndInputDifferntConditions () -> (f32[],
+f32[]) { %constant.2 = f32[] constant(1) %constant.3 = f32[] constant(2)
+ %tuple.1 = (f32[], f32[]) tuple(f32[] %constant.2, f32[] %constant.3)
+ %while = (f32[], f32[]) while((f32[], f32[]) %tuple.1),
+condition=%condition, body=%body ROOT %while.1 = (f32[], f32[]) while((f32[],
+f32[]) %tuple.1), condition=%condition.1, body=%body
+ })")
+ .ValueOrDie();
+
+ auto computation = module->entry_computation();
+
+ EXPECT_EQ(5, computation->instruction_count());
+ HloCSE cse(true);
+ EXPECT_FALSE(cse.Run(module.get()).ValueOrDie());
+ EXPECT_EQ(5, computation->instruction_count());
+}
+
TEST_F(HloCseTest, IdenticalInstructionsDifferentLayoutsSensitive) {
// Test that two identical instructions with different layouts are *not*
// commoned if the pass is layout sensitive.
diff --git a/tensorflow/compiler/xla/service/hlo_domain_metadata.h b/tensorflow/compiler/xla/service/hlo_domain_metadata.h
index aa0308100a..f855f2a1fc 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_metadata.h
@@ -71,12 +71,6 @@ class DomainMetadata {
// Returns a string representation of the metadata.
virtual string ToString() const = 0;
-
- // Given a reachable set (the set of instructions which are reachable from
- // each other via user/operand pathways, without crossing a kDomain
- // instruciton), makes sure that all of them have metadata attributes which
- // are coherent with this metadata object.
- virtual Status NormalizeInstructions(const Domain& domain) const = 0;
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_remover.cc b/tensorflow/compiler/xla/service/hlo_domain_remover.cc
index e2e820002b..67fad0769f 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_remover.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_remover.cc
@@ -47,12 +47,12 @@ Status HloDomainRemover::RunContext::VerifyAndNormalizeDomain(
HloDomainVerifier::VerifyDomain(domain));
if (ref_metadata != nullptr) {
VLOG(4) << "Applying domain normalization: " << ref_metadata->ToString();
- TF_RETURN_IF_ERROR(ref_metadata->NormalizeInstructions(domain));
+ TF_RETURN_IF_ERROR(remover_->normalizer_(domain, ref_metadata));
} else {
// No kDomain instruction was present within this domain, so call the
// generic normalization functions and have them apply their heuristic.
VLOG(2) << "Applying domain-less normalization";
- TF_RETURN_IF_ERROR(remover_->normalizer_(domain));
+ TF_RETURN_IF_ERROR(remover_->normalizer_(domain, nullptr));
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_domain_remover.h b/tensorflow/compiler/xla/service/hlo_domain_remover.h
index 0c71dd34fd..c859e05f02 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_remover.h
+++ b/tensorflow/compiler/xla/service/hlo_domain_remover.h
@@ -35,9 +35,10 @@ class HloDomainRemover : public HloPassInterface {
// instructions in it with the same attributes (ie, sharding), a normalizer
// function is tasked at applying attribute normalization on the instructions
// within such domain.
- HloDomainRemover(
- tensorflow::StringPiece kind,
- std::function<Status(const DomainMetadata::Domain&)> normalizer)
+ HloDomainRemover(tensorflow::StringPiece kind,
+ std::function<Status(const DomainMetadata::Domain&,
+ const DomainMetadata* metadata)>
+ normalizer)
: kind_(kind.ToString()), normalizer_(std::move(normalizer)) {}
tensorflow::StringPiece name() const override { return "domain_remover"; }
@@ -48,7 +49,9 @@ class HloDomainRemover : public HloPassInterface {
class RunContext;
string kind_;
- std::function<Status(const DomainMetadata::Domain&)> normalizer_;
+ std::function<Status(const DomainMetadata::Domain&,
+ const DomainMetadata* metadata)>
+ normalizer_;
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_domain_test.cc b/tensorflow/compiler/xla/service/hlo_domain_test.cc
index 00b2c860a7..ffc18a0f88 100644
--- a/tensorflow/compiler/xla/service/hlo_domain_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_domain_test.cc
@@ -97,12 +97,6 @@ class OpNameMetadata : public DomainMetadata {
string ToString() const override { return opname_; }
- Status NormalizeInstructions(
- const DomainMetadata::Domain& domain) const override {
- // For the purposes of this test, nothing to do.
- return Status::OK();
- }
-
static tensorflow::StringPiece KindName() { return "opname"; }
private:
@@ -124,7 +118,8 @@ std::unique_ptr<HloInstruction> OpNameDomainCreator(HloInstruction* instruction,
std::move(user_side_metadata));
}
-Status OpNameDomainNormalizer(const DomainMetadata::Domain& domain) {
+Status OpNameDomainNormalizer(const DomainMetadata::Domain& domain,
+ const DomainMetadata* metadata) {
// Nothing to do for the particular use this test make of the OpName domains.
return Status::OK();
}
@@ -159,7 +154,7 @@ ENTRY entry {
EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
@@ -227,7 +222,7 @@ ENTRY entry {
EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
@@ -277,7 +272,7 @@ ENTRY entry {
LOG(INFO) << "Original module:\n" << module->ToString();
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_FALSE(remover_changed);
@@ -324,7 +319,7 @@ ENTRY entry {
EXPECT_FALSE(HasDomainEdge(module, "e", "d"));
HloDomainRemover sharding_remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool sharding_remover_changed,
sharding_remover.Run(module));
EXPECT_TRUE(sharding_remover_changed);
@@ -411,7 +406,7 @@ ENTRY entry {
}
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
@@ -465,7 +460,7 @@ ENTRY entry {
TF_EXPECT_OK(module->entry_computation()->RemoveInstruction(tuple));
HloDomainRemover remover(ShardingMetadata::KindName(),
- NormalizeShardingDomain);
+ ShardingMetadata::NormalizeShardingDomain);
TF_ASSERT_OK_AND_ASSIGN(bool remover_changed, remover.Run(module));
EXPECT_TRUE(remover_changed);
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.cc b/tensorflow/compiler/xla/service/hlo_evaluator.cc
index dfdfeb49a2..51353eea6e 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.cc
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.cc
@@ -1122,45 +1122,90 @@ Status HloEvaluator::HandleWhile(HloInstruction* while_hlo) {
// hoops to make this work.
namespace {
template <typename KeyType, typename ValueType>
-std::unique_ptr<Literal> EvaluateSortInternal(HloInstruction* sort,
- const Literal& keys_literal,
- const Literal& values_literal) {
- CHECK_EQ(sort->operand_count(), 2);
+StatusOr<std::unique_ptr<Literal>> EvaluateSortInternal(
+ HloInstruction* sort, const Literal& keys_literal,
+ const Literal& values_literal) {
+ auto rank = ShapeUtil::Rank(keys_literal.shape());
+ TF_RET_CHECK(
+ ShapeUtil::SameDimensions(keys_literal.shape(), values_literal.shape()))
+ << "Sort keys and values must have the same dimensions";
+ TF_RET_CHECK(rank > 0 && rank <= 2)
+ << "Sort is only supported for rank-1 and rank-2 shapes, rank is: "
+ << rank;
+ TF_RET_CHECK(sort->operand_count() == 2) << "Expected key-value sort";
// We need to sort and array of keys and an array of values, where the
// sorted order of the values is determined by the keys. The simplest(?)
// way to do this is to go to an array-of-pairs representation, sort the
// array using the keys, and then go back to pair-of-arrays.
VLOG(3) << "HandleSort keys_literal: " << keys_literal.ToString();
VLOG(3) << "HandleSort values_literal: " << values_literal.ToString();
- const auto& keys_data = keys_literal.data<KeyType>();
- const auto& values_data = values_literal.data<ValueType>();
- using kv_pair = std::pair<KeyType, ValueType>;
- std::vector<kv_pair> key_value_vector;
- CHECK_EQ(keys_data.size(), values_data.size());
- key_value_vector.reserve(keys_data.size());
- for (int i = 0; i < keys_data.size(); ++i) {
- key_value_vector.push_back(std::make_pair(keys_data[i], values_data[i]));
- }
- std::sort(key_value_vector.begin(), key_value_vector.end(),
- [](const kv_pair& a, const kv_pair& b) {
- return SafeLess<KeyType>(a.first, b.first);
- });
- std::vector<KeyType> result_keys;
- std::vector<ValueType> result_values;
- for (const auto& key_value : key_value_vector) {
- result_keys.push_back(key_value.first);
- result_values.push_back(key_value.second);
+
+ auto sort_r1 = [](const Literal& keys_literal,
+ const Literal& values_literal) {
+ const auto& keys_data = keys_literal.data<KeyType>();
+ const auto& values_data = values_literal.data<ValueType>();
+
+ using kv_pair = std::pair<KeyType, ValueType>;
+ std::vector<kv_pair> key_value_vector;
+ CHECK_EQ(keys_data.size(), values_data.size());
+ key_value_vector.reserve(keys_data.size());
+ for (int i = 0; i < keys_data.size(); ++i) {
+ key_value_vector.push_back(std::make_pair(keys_data[i], values_data[i]));
+ }
+ std::sort(key_value_vector.begin(), key_value_vector.end(),
+ [](const kv_pair& a, const kv_pair& b) {
+ return SafeLess<KeyType>(a.first, b.first);
+ });
+ std::vector<KeyType> result_keys;
+ std::vector<ValueType> result_values;
+ for (const auto& key_value : key_value_vector) {
+ result_keys.push_back(key_value.first);
+ result_values.push_back(key_value.second);
+ }
+ auto result_keys_literal = MakeUnique<Literal>(keys_literal.shape());
+ result_keys_literal->PopulateR1(
+ tensorflow::gtl::ArraySlice<KeyType>(result_keys));
+ auto result_values_literal = MakeUnique<Literal>(values_literal.shape());
+ result_values_literal->PopulateR1(
+ tensorflow::gtl::ArraySlice<ValueType>(result_values));
+ return std::make_pair(std::move(result_keys_literal),
+ std::move(result_values_literal));
+ };
+
+ std::unique_ptr<Literal> result_tuple;
+ if (rank == 1) {
+ auto result_pair = sort_r1(keys_literal, values_literal);
+ result_tuple = LiteralUtil::MakeTuple(
+ {result_pair.first.get(), result_pair.second.get()});
+ } else {
+ // For R2 sort, the desired semantics are to sort each matrix row
+ // independently.
+ auto keys_result_literal = MakeUnique<Literal>(keys_literal.shape());
+ auto values_result_literal = MakeUnique<Literal>(values_literal.shape());
+ int64 r1_length = keys_literal.shape().dimensions(1);
+ for (int64 row = 0; row < keys_literal.shape().dimensions(0); ++row) {
+ TF_ASSIGN_OR_RETURN(auto keys_r1_slice,
+ keys_literal.Slice({row, 0}, {row + 1, r1_length})
+ ->Reshape({r1_length}));
+ TF_ASSIGN_OR_RETURN(auto values_r1_slice,
+ values_literal.Slice({row, 0}, {row + 1, r1_length})
+ ->Reshape({r1_length}));
+ auto r1_result_pair = sort_r1(*keys_r1_slice, *values_r1_slice);
+ TF_ASSIGN_OR_RETURN(auto sorted_keys,
+ r1_result_pair.first->Reshape({1, r1_length}));
+ TF_ASSIGN_OR_RETURN(auto sorted_values,
+ r1_result_pair.second->Reshape({1, r1_length}));
+ TF_RETURN_IF_ERROR(keys_result_literal->CopySliceFrom(
+ *sorted_keys, {0, 0}, {row, 0}, {1, r1_length}));
+ TF_RETURN_IF_ERROR(values_result_literal->CopySliceFrom(
+ *sorted_values, {0, 0}, {row, 0}, {1, r1_length}));
+ }
+ result_tuple = LiteralUtil::MakeTuple(
+ {keys_result_literal.get(), values_result_literal.get()});
}
- auto result_keys_literal = MakeUnique<Literal>(sort->operand(0)->shape());
- result_keys_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<KeyType>(result_keys));
- auto result_values_literal = MakeUnique<Literal>(sort->operand(1)->shape());
- result_values_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<ValueType>(result_values));
- auto result_tuple = LiteralUtil::MakeTuple(
- {result_keys_literal.get(), result_values_literal.get()});
+
VLOG(3) << "HandleSort result_tuple: " << result_tuple->ToString();
- return result_tuple;
+ return std::move(result_tuple);
}
template <typename KeyType>
@@ -1204,6 +1249,15 @@ StatusOr<std::unique_ptr<Literal>> EvaluateSort(HloInstruction* sort,
} // namespace
Status HloEvaluator::HandleSort(HloInstruction* sort) {
+ const int64 sort_dim = sort->dimensions(0);
+ const int64 rank = ShapeUtil::Rank(sort->operand(0)->shape());
+ if (sort_dim != rank - 1) {
+ return Unimplemented(
+ "Trying to support along dimension %lld, which is not the last "
+ "dimension",
+ sort_dim);
+ }
+
if (!ShapeUtil::IsTuple(sort->shape())) {
return DefaultAction(sort);
} else {
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
index 2ae5f8bf36..f5e477e115 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator_typed_visitor.h
@@ -301,6 +301,14 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return HandleFloor<ReturnT>(floor);
}
+ Status HandleImag(HloInstruction* imag) override {
+ TF_ASSIGN_OR_RETURN(parent_->evaluated_[imag],
+ ElementWiseUnaryOp(imag, [](ElementwiseT elem_operand) {
+ return std::imag(elem_operand);
+ }));
+ return Status::OK();
+ }
+
Status HandleLog(HloInstruction* log) override {
TF_ASSIGN_OR_RETURN(parent_->evaluated_[log],
ElementWiseUnaryOp(log, [](ElementwiseT elem_operand) {
@@ -604,6 +612,14 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return Status::OK();
}
+ Status HandleReal(HloInstruction* real) override {
+ TF_ASSIGN_OR_RETURN(parent_->evaluated_[real],
+ ElementWiseUnaryOp(real, [](ElementwiseT elem_operand) {
+ return std::real(elem_operand);
+ }));
+ return Status::OK();
+ }
+
template <
typename NativeT,
typename std::enable_if<!is_complex_t<NativeT>::value>::type* = nullptr>
@@ -1399,25 +1415,48 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
!std::is_same<NativeT, bool>::value>::type* = nullptr>
Status HandleSort(HloInstruction* sort) {
auto keys = sort->operand(0);
- TF_RET_CHECK(ShapeUtil::Rank(keys->shape()) == 1)
- << "Sort is only supported for R1 shapes";
+ auto rank = ShapeUtil::Rank(keys->shape());
+ TF_RET_CHECK(rank > 0 && rank <= 2)
+ << "Sort is only supported for R1 and R2 shapes";
TF_RET_CHECK(sort->operand_count() == 1)
<< "Typed visitor does not support key-value sort";
const Literal& keys_literal = parent_->GetEvaluatedLiteralFor(keys);
- VLOG(3) << "HandleSort keys_literal: " << keys_literal.ToString();
- const auto& keys_data = keys_literal.data<ReturnT>();
- std::vector<ReturnT> result_data(keys_data.begin(), keys_data.end());
- std::sort(result_data.begin(), result_data.end(),
- [](const ReturnT& a, const ReturnT& b) {
- return SafeLess<ReturnT>(a, b);
- });
- auto result_literal = MakeUnique<Literal>(sort->shape());
- result_literal->PopulateR1(
- tensorflow::gtl::ArraySlice<ReturnT>(result_data));
- VLOG(3) << "HandleSort result_literal: " << result_literal->ToString();
- parent_->evaluated_[sort] = std::move(result_literal);
+ auto sort_r1 = [this](const Literal& keys_literal) {
+ VLOG(3) << "HandleSort keys_literal: " << keys_literal.ToString();
+ const auto& keys_data = keys_literal.data<ReturnT>();
+
+ std::vector<ReturnT> result_data(keys_data.begin(), keys_data.end());
+ std::sort(result_data.begin(), result_data.end(),
+ [](const ReturnT& a, const ReturnT& b) {
+ return SafeLess<ReturnT>(a, b);
+ });
+ auto result_literal = MakeUnique<Literal>(keys_literal.shape());
+ result_literal->PopulateR1(
+ tensorflow::gtl::ArraySlice<ReturnT>(result_data));
+ VLOG(3) << "HandleSort result_literal: " << result_literal->ToString();
+ return result_literal;
+ };
+
+ if (rank == 1) {
+ parent_->evaluated_[sort] = std::move(sort_r1(keys_literal));
+ } else {
+ // For R2 sort, the desired semantics are to sort each matrix row
+ // independently.
+ auto result_literal = MakeUnique<Literal>(keys_literal.shape());
+ int64 r1_length = keys->shape().dimensions(1);
+ for (int64 row = 0; row < keys->shape().dimensions(0); ++row) {
+ TF_ASSIGN_OR_RETURN(auto r1_slice,
+ keys_literal.Slice({row, 0}, {row + 1, r1_length})
+ ->Reshape({r1_length}));
+ auto r1_result = sort_r1(*r1_slice);
+ TF_ASSIGN_OR_RETURN(r1_result, r1_result->Reshape({1, r1_length}));
+ TF_RETURN_IF_ERROR(result_literal->CopySliceFrom(
+ *r1_result, {0, 0}, {row, 0}, {1, r1_length}));
+ }
+ parent_->evaluated_[sort] = std::move(result_literal);
+ }
return Status::OK();
}
@@ -1958,6 +1997,30 @@ class HloEvaluatorTypedVisitor : public DfsHloVisitorWithDefault {
return HandleReducePrecision<ElementwiseT>(reduce_precision);
}
+ template <typename NativeT,
+ typename std::enable_if<
+ std::is_same<NativeT, float>::value ||
+ std::is_same<NativeT, int32>::value ||
+ std::is_same<NativeT, uint32>::value>::type* = nullptr>
+ Status HandleIota(HloInstruction* iota) {
+ auto result = MakeUnique<Literal>(iota->shape());
+ auto data = result->data<ReturnT>();
+ std::iota(data.begin(), data.end(), 0);
+ parent_->evaluated_[iota] = std::move(result);
+ return Status::OK();
+ }
+ template <typename NativeT,
+ typename std::enable_if<
+ !(std::is_same<NativeT, float>::value ||
+ std::is_same<NativeT, int32>::value ||
+ std::is_same<NativeT, uint32>::value)>::type* = nullptr>
+ Status HandleIota(HloInstruction* iota) {
+ return InvalidArgument("Unsupported type for iota");
+ }
+ Status HandleIota(HloInstruction* iota) override {
+ return HandleIota<ReturnT>(iota);
+ }
+
private:
// Creates a vector of multipliers which can be used to create a linear index
// into shape.
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
index 57cf34d7de..fd5085bed2 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
@@ -948,6 +948,7 @@ ColorScheme HloDotDumper::GetInstructionColor(const HloInstruction* instr) {
case HloOpcode::kGe:
case HloOpcode::kGt:
case HloOpcode::kImag:
+ case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLe:
case HloOpcode::kLog:
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index 830ebfb125..8b9bdd2f46 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -115,26 +115,27 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
TF_RET_CHECK(proto.operand_ids_size() == 2)
<< "Send instruction should have 2 operand but sees "
<< proto.operand_ids_size();
- instruction = CreateSend(operands(0), operands(1), proto.channel_id());
+ instruction = CreateSend(operands(0), operands(1), proto.channel_id(),
+ proto.is_host_transfer());
break;
case HloOpcode::kSendDone:
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "SendDone instruction should have 1 operand but sees "
<< proto.operand_ids_size();
- instruction = CreateSendDone(operands(0));
+ instruction = CreateSendDone(operands(0), proto.is_host_transfer());
break;
case HloOpcode::kRecv:
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "Recv instruction should have 1 operand but sees "
<< proto.operand_ids_size();
instruction = CreateRecv(proto.shape().tuple_shapes(0), operands(0),
- proto.channel_id());
+ proto.channel_id(), proto.is_host_transfer());
break;
case HloOpcode::kRecvDone:
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "RecvDone instruction should have 1 operand but sees "
<< proto.operand_ids_size();
- instruction = CreateRecvDone(operands(0));
+ instruction = CreateRecvDone(operands(0), proto.is_host_transfer());
break;
case HloOpcode::kReverse:
TF_RET_CHECK(proto.operand_ids_size() == 1)
@@ -386,6 +387,23 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
slice_sizes);
break;
}
+ case HloOpcode::kGather: {
+ TF_RET_CHECK(proto.operand_ids_size() == 2)
+ << "Gather instruction should have 2 operands but sees "
+ << proto.operand_ids_size();
+ TF_RET_CHECK(proto.has_gather_dimension_numbers())
+ << "Gather instruction should have GatherDimensionNumbers set.";
+ std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers =
+ MakeUnique<GatherDimensionNumbers>(proto.gather_dimension_numbers());
+ std::vector<int64> gather_window_bounds;
+ for (int64 bound : proto.gather_window_bounds()) {
+ gather_window_bounds.push_back(bound);
+ }
+ instruction =
+ CreateGather(proto.shape(), operands(0), operands(1),
+ *gather_dimension_numbers, gather_window_bounds);
+ break;
+ }
default: {
instruction = WrapUnique(new HloInstruction(opcode, proto.shape()));
for (const int64 operand_id : proto.operand_ids()) {
@@ -427,13 +445,6 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
instruction->set_sharding(sharding);
}
- if (proto.has_gather_dimension_numbers()) {
- instruction->gather_dimension_numbers_ =
- MakeUnique<GatherDimensionNumbers>(proto.gather_dimension_numbers());
- }
- for (int64 bound : proto.gather_window_bounds()) {
- instruction->gather_window_bounds_.push_back(bound);
- }
return std::move(instruction);
}
@@ -452,6 +463,11 @@ StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
return MakeUnique<HloConstantInstruction>(std::move(literal));
}
+/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateIota(
+ const Shape& shape) {
+ return WrapUnique(new HloInstruction(HloOpcode::kIota, shape));
+}
+
/* static */ std::unique_ptr<HloInstruction>
HloInstruction::CreateGetTupleElement(const Shape& shape,
HloInstruction* operand, int64 index) {
@@ -665,29 +681,33 @@ HloInstruction::CreateCrossReplicaSum(
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateSend(
- HloInstruction* operand, HloInstruction* token, int64 channel_id) {
- return MakeUnique<HloSendInstruction>(operand, token, channel_id);
+ HloInstruction* operand, HloInstruction* token, int64 channel_id,
+ bool is_host_transfer) {
+ return MakeUnique<HloSendInstruction>(operand, token, channel_id,
+ is_host_transfer);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateSendDone(
- HloInstruction* operand) {
+ HloInstruction* operand, bool is_host_transfer) {
auto send_operand = DynCast<HloSendInstruction>(operand);
CHECK(send_operand != nullptr)
<< "SendDone must take the context operand from Send";
- return MakeUnique<HloSendDoneInstruction>(send_operand);
+ return MakeUnique<HloSendDoneInstruction>(send_operand, is_host_transfer);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateRecv(
- const Shape& shape, HloInstruction* token, int64 channel_id) {
- return MakeUnique<HloRecvInstruction>(shape, token, channel_id);
+ const Shape& shape, HloInstruction* token, int64 channel_id,
+ bool is_host_transfer) {
+ return MakeUnique<HloRecvInstruction>(shape, token, channel_id,
+ is_host_transfer);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateRecvDone(
- HloInstruction* operand) {
+ HloInstruction* operand, bool is_host_transfer) {
auto recv_operand = DynCast<HloRecvInstruction>(operand);
CHECK(recv_operand != nullptr)
<< "RecvDone must take the context operand from Recv";
- return MakeUnique<HloRecvDoneInstruction>(recv_operand);
+ return MakeUnique<HloRecvDoneInstruction>(recv_operand, is_host_transfer);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateReverse(
@@ -978,6 +998,8 @@ bool HloInstruction::HasSideEffectNoRecurse() const {
case HloOpcode::kTrace:
case HloOpcode::kHostCompute:
return true;
+ case HloOpcode::kCrossReplicaSum:
+ return all_reduce_id().has_value();
default:
return false;
}
@@ -1036,34 +1058,8 @@ bool HloInstruction::HasSideEffect() const {
const Shape& shape, HloInstruction* operand, HloInstruction* gather_indices,
const GatherDimensionNumbers& gather_dim_numbers,
tensorflow::gtl::ArraySlice<int64> window_bounds) {
- std::unique_ptr<HloInstruction> instruction =
- WrapUnique(new HloInstruction(HloOpcode::kGather, shape));
- instruction->AppendOperand(operand);
- instruction->AppendOperand(gather_indices);
- instruction->gather_dimension_numbers_ =
- MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
- c_copy(window_bounds, std::back_inserter(instruction->gather_window_bounds_));
- return instruction;
-}
-
-/* static */ GatherDimensionNumbers HloInstruction::MakeGatherDimNumbers(
- tensorflow::gtl::ArraySlice<int64> output_window_dims,
- tensorflow::gtl::ArraySlice<int64> elided_window_dims,
- tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
- int64 index_vector_dim) {
- GatherDimensionNumbers gather_dim_numbers;
- for (int64 output_window_dim : output_window_dims) {
- gather_dim_numbers.add_output_window_dims(output_window_dim);
- }
- for (int64 elided_window_dim : elided_window_dims) {
- gather_dim_numbers.add_elided_window_dims(elided_window_dim);
- }
- for (int64 gather_dim_to_input_dim : gather_dims_to_operand_dims) {
- gather_dim_numbers.add_gather_dims_to_operand_dims(gather_dim_to_input_dim);
- }
-
- gather_dim_numbers.set_index_vector_dim(index_vector_dim);
- return gather_dim_numbers;
+ return MakeUnique<HloGatherInstruction>(shape, operand, gather_indices,
+ gather_dim_numbers, window_bounds);
}
/* static */ std::unique_ptr<HloInstruction> HloInstruction::CreateDomain(
@@ -1127,6 +1123,8 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
case HloOpcode::kSort:
+ case HloOpcode::kGather:
+ case HloOpcode::kIota:
clone = CloneWithNewOperandsImpl(shape, new_operands, context);
break;
// Unary ops.
@@ -1228,11 +1226,6 @@ std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
true_computation(), new_operands[2],
false_computation());
break;
- case HloOpcode::kGather:
- CHECK_EQ(new_operands.size(), 2);
- clone = CreateGather(shape, new_operands[0], new_operands[1],
- *gather_dimension_numbers_, gather_window_bounds_);
- break;
case HloOpcode::kDomain:
CHECK_EQ(new_operands.size(), 1);
clone =
@@ -1529,8 +1522,7 @@ bool HloInstruction::IdenticalSlowPath(
case HloOpcode::kTupleSelect:
return true;
- // These opcodes have complex or special behavior so just return false.
- case HloOpcode::kWhile:
+ // This opcode has complex or special behavior so just return false.
case HloOpcode::kAfterAll:
return false;
@@ -1539,11 +1531,6 @@ bool HloInstruction::IdenticalSlowPath(
return protobuf_util::ProtobufEquals(dot_dimension_numbers(),
other.dot_dimension_numbers());
- case HloOpcode::kGather:
- return protobuf_util::ProtobufEquals(gather_dimension_numbers(),
- other.gather_dimension_numbers()) &&
- gather_window_bounds() == other.gather_window_bounds();
-
// Remaining instructions with special values.
case HloOpcode::kCall:
return eq_computations(to_apply(), other.to_apply());
@@ -1551,6 +1538,14 @@ bool HloInstruction::IdenticalSlowPath(
return eq_computations(true_computation(), other.true_computation()) &&
eq_computations(false_computation(), other.false_computation());
+ case HloOpcode::kWhile: {
+ if (eq_computations(while_body(), other.while_body()) &&
+ eq_computations(while_condition(), other.while_condition())) {
+ return true;
+ }
+ return false;
+ }
+
case HloOpcode::kDomain:
return operand_side_metadata().Matches(other.operand_side_metadata()) &&
user_side_metadata().Matches(other.user_side_metadata());
@@ -1574,6 +1569,7 @@ bool HloInstruction::IdenticalSlowPath(
case HloOpcode::kMap:
case HloOpcode::kSlice:
case HloOpcode::kConstant:
+ case HloOpcode::kIota:
case HloOpcode::kTrace:
case HloOpcode::kFusion:
case HloOpcode::kRng:
@@ -1590,9 +1586,11 @@ bool HloInstruction::IdenticalSlowPath(
case HloOpcode::kHostCompute:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
+ case HloOpcode::kGather:
LOG(FATAL) << "Base class impl called for opcode with subclass: "
<< opcode();
}
+ return false;
}
void HloInstruction::RemoveUser(HloInstruction* user) {
@@ -1863,6 +1861,10 @@ bool HloInstruction::IsElementwiseImpl(
}
}
+bool HloInstruction::IsCrossModuleAllReduce() const {
+ return opcode() == HloOpcode::kCrossReplicaSum && all_reduce_id();
+}
+
string HloInstruction::ToStringWithCanonicalNameMap(
const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
@@ -1955,11 +1957,6 @@ std::vector<string> HloInstruction::ExtraAttributesToString(
if (dot_dimension_numbers_ != nullptr) {
extra.push_back(DotDimensionNumbersToString());
}
- if (gather_dimension_numbers_ != nullptr) {
- extra.push_back(GatherDimensionNumbersToString());
- extra.push_back(
- StrCat("window_bounds={", Join(gather_window_bounds(), ","), "}"));
- }
if (options.print_subcomputation_mode() ==
HloPrintOptions::PrintSubcomputationMode::kNameOnly) {
@@ -2089,14 +2086,6 @@ HloInstructionProto HloInstruction::ToProto() const {
if (dot_dimension_numbers_ != nullptr) {
*proto.mutable_dot_dimension_numbers() = *dot_dimension_numbers_;
}
- if (gather_dimension_numbers_ != nullptr) {
- *proto.mutable_gather_dimension_numbers() = *gather_dimension_numbers_;
- }
- if (opcode() == HloOpcode::kGather) {
- for (int64 bound : gather_window_bounds()) {
- proto.add_gather_window_bounds(bound);
- }
- }
if (has_sharding()) {
*proto.mutable_sharding() = sharding().ToProto();
@@ -2326,6 +2315,8 @@ Status HloInstruction::Visit(DfsHloVisitorBase<HloInstructionPtr>* visitor) {
return visitor->HandleDomain(this);
case HloOpcode::kAfterAll:
return visitor->HandleAfterAll(this);
+ case HloOpcode::kIota:
+ return visitor->HandleIota(this);
// These opcodes are not handled here.
case HloOpcode::kTrace:
@@ -2857,26 +2848,6 @@ std::ostream& operator<<(std::ostream& os, HloInstruction::FusionKind kind) {
return os << ToString(kind);
}
-string HloInstruction::GatherDimensionNumbersToString() const {
- CHECK_NE(gather_dimension_numbers_.get(), nullptr);
- string output_window_dims =
- StrCat("output_window_dims={",
- Join(gather_dimension_numbers_->output_window_dims(), ","), "}");
- string elided_window_dims =
- StrCat("elided_window_dims={",
- Join(gather_dimension_numbers_->elided_window_dims(), ","), "}");
- string gather_dims_to_operand_dims = StrCat(
- "gather_dims_to_operand_dims={",
- Join(gather_dimension_numbers_->gather_dims_to_operand_dims(), ","), "}");
- string index_vector_dim = StrCat(
- "index_vector_dim=", gather_dimension_numbers_->index_vector_dim());
-
- return Join<std::initializer_list<string>>(
- {output_window_dims, elided_window_dims, gather_dims_to_operand_dims,
- index_vector_dim},
- ", ");
-}
-
bool HloInstruction::CouldBeBitcast() const {
switch (opcode_) {
case HloOpcode::kTranspose:
@@ -3190,4 +3161,14 @@ int64 HloInstruction::slice_sizes(int64 dimension) const {
const std::vector<int64>& HloInstruction::dynamic_slice_sizes() const {
return Cast<HloDynamicSliceInstruction>(this)->dynamic_slice_sizes();
}
+
+const GatherDimensionNumbers& HloInstruction::gather_dimension_numbers() const {
+ return Cast<HloGatherInstruction>(this)->gather_dimension_numbers();
+}
+
+tensorflow::gtl::ArraySlice<int64> HloInstruction::gather_window_bounds()
+ const {
+ return Cast<HloGatherInstruction>(this)->gather_window_bounds();
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index b392d65636..30bff286c2 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -346,6 +346,9 @@ class HloInstruction {
static std::unique_ptr<HloInstruction> CreateConstant(
std::unique_ptr<Literal> literal);
+ // Creates an Iota instruction.
+ static std::unique_ptr<HloInstruction> CreateIota(const Shape& shape);
+
// Creates a get tuple element instruction.
static std::unique_ptr<HloInstruction> CreateGetTupleElement(
const Shape& shape, HloInstruction* operand, int64 index);
@@ -485,27 +488,30 @@ class HloInstruction {
// Creates an asynchronous send instruction with the given channel id, which
// initiates sending the operand data to a unique receive instruction in
- // another computation that has the same channel id.
- static std::unique_ptr<HloInstruction> CreateSend(HloInstruction* operand,
- HloInstruction* token,
- int64 channel_id);
+ // another computation that has the same channel id. If is_host_transfer is
+ // true, then this Send operation transfers data to the host.
+ static std::unique_ptr<HloInstruction> CreateSend(
+ HloInstruction* operand, HloInstruction* token, int64 channel_id,
+ bool is_host_transfer = false);
// Blocks until data transfer for the Send instruction (operand) is complete.
// The operand must be kSend.
static std::unique_ptr<HloInstruction> CreateSendDone(
- HloInstruction* operand);
+ HloInstruction* operand, bool is_host_transfer = false);
// Creates an asynchronous receive instruction with the given channel id,
// which allocates resources to receive data of the given shape from a unique
- // send instruction in another computation that has the same channel id.
- static std::unique_ptr<HloInstruction> CreateRecv(const Shape& shape,
- HloInstruction* token,
- int64 channel_id);
+ // send instruction in another computation that has the same channel id. If
+ // is_host_transfer is true, then this Send operation transfers data from the
+ // host.
+ static std::unique_ptr<HloInstruction> CreateRecv(
+ const Shape& shape, HloInstruction* token, int64 channel_id,
+ bool is_host_transfer = false);
// Blocks until data transfer for the Recv instruction (operand) is complete
// and returns the receive buffer. The operand must be kRecv.
static std::unique_ptr<HloInstruction> CreateRecvDone(
- HloInstruction* operand);
+ HloInstruction* operand, bool is_host_transfer = false);
// Creates a slice instruction, where the operand is sliced by the given
// start/limit indices.
@@ -700,13 +706,6 @@ class HloInstruction {
// when we plumb a primordial token from the entry computation.
static std::unique_ptr<HloInstruction> CreateToken();
- // Creates an instance of GatherDimensionNumbers.
- static GatherDimensionNumbers MakeGatherDimNumbers(
- tensorflow::gtl::ArraySlice<int64> output_window_dims,
- tensorflow::gtl::ArraySlice<int64> elided_window_dims,
- tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
- int64 index_vector_dim);
-
// Returns the opcode for this instruction.
HloOpcode opcode() const { return opcode_; }
@@ -1081,19 +1080,6 @@ class HloInstruction {
// Returns the dump string of the dot dimension numbers.
string DotDimensionNumbersToString() const;
- const GatherDimensionNumbers& gather_dimension_numbers() const {
- CHECK(gather_dimension_numbers_ != nullptr);
- return *gather_dimension_numbers_;
- }
-
- tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const {
- CHECK_EQ(opcode(), HloOpcode::kGather);
- return gather_window_bounds_;
- }
-
- // Returns the dump string of the gather dimension numbers.
- string GatherDimensionNumbersToString() const;
-
// Clones the HLO instruction. The clone will have the same opcode, shape, and
// operands. After creation the clone has no uses. "this" (the instruction
// cloned from) is not changed. Suffix is the string to append to the name of
@@ -1148,6 +1134,9 @@ class HloInstruction {
// Returns true if this instruction is elementwise on all its operands.
bool IsElementwise() const;
+ // Returns true if this is an cross module all-reduce instrucion.
+ bool IsCrossModuleAllReduce() const;
+
// Returns true if this elementwise instruction implicitly broadcasts operand
// `operand_idx`.
//
@@ -1460,6 +1449,12 @@ class HloInstruction {
// Delegates to HloDynamicSliceInstruction::dynamic_slice_sizes.
const std::vector<int64>& dynamic_slice_sizes() const;
+
+ // Delegates to HloGatherInstruction::gather_dimension_numbers.
+ const GatherDimensionNumbers& gather_dimension_numbers() const;
+ // Delegates to HloGatherInstruction::gather_window_bounds.
+ tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const;
+
// Old methods kept for smooth subclassing transition END.
protected:
@@ -1603,9 +1598,6 @@ class HloInstruction {
// Describes the dimension numbers used for a dot.
std::unique_ptr<DotDimensionNumbers> dot_dimension_numbers_;
- std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
- std::vector<int64> gather_window_bounds_;
-
// Used to tag kCopy instructions that are eligible for copy elision.
bool copy_elision_allowed_ = true;
diff --git a/tensorflow/compiler/xla/service/hlo_instruction_test.cc b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
index 87c048930f..b75a2bd34b 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction_test.cc
@@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/protobuf_util.h"
#include "tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
@@ -1369,7 +1370,7 @@ TEST_F(HloInstructionTest, StringifyGather_0) {
HloInstruction* gather_instruction =
builder.AddInstruction(HloInstruction::CreateGather(
gather_result_shape, input, gather_indices,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1405,7 +1406,7 @@ TEST_F(HloInstructionTest, StringifyGather_1) {
HloInstruction* gather_instruction =
builder.AddInstruction(HloInstruction::CreateGather(
gather_result_shape, input, gather_indices,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.cc b/tensorflow/compiler/xla/service/hlo_instructions.cc
index 7ea42caa7b..df26a2c744 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.cc
+++ b/tensorflow/compiler/xla/service/hlo_instructions.cc
@@ -181,8 +181,11 @@ std::unique_ptr<HloInstruction> HloFftInstruction::CloneWithNewOperandsImpl(
HloSendRecvInstruction::HloSendRecvInstruction(HloOpcode opcode,
const Shape& shape,
- int64 channel_id)
- : HloInstruction(opcode, shape), channel_id_(channel_id) {}
+ int64 channel_id,
+ bool is_host_transfer)
+ : HloInstruction(opcode, shape),
+ channel_id_(channel_id),
+ is_host_transfer_(is_host_transfer) {}
HloInstructionProto HloSendRecvInstruction::ToProto() const {
HloInstructionProto proto = HloInstruction::ToProto();
@@ -192,7 +195,12 @@ HloInstructionProto HloSendRecvInstruction::ToProto() const {
std::vector<string> HloSendRecvInstruction::ExtraAttributesToStringImpl(
const HloPrintOptions& options) const {
- return {StrCat("channel_id=", channel_id_)};
+ std::vector<string> attrs;
+ attrs.push_back(StrCat("channel_id=", channel_id_));
+ if (is_host_transfer()) {
+ attrs.push_back("is_host_transfer=true");
+ }
+ return attrs;
}
bool HloSendRecvInstruction::IdenticalSlowPath(
@@ -205,13 +213,14 @@ bool HloSendRecvInstruction::IdenticalSlowPath(
// Send instruction produces a tuple of {aliased operand, U32 context}.
HloSendInstruction::HloSendInstruction(HloInstruction* operand,
- HloInstruction* token, int64 channel_id)
+ HloInstruction* token, int64 channel_id,
+ bool is_host_transfer)
: HloSendRecvInstruction(
HloOpcode::kSend,
ShapeUtil::MakeTupleShape({CHECK_NOTNULL(operand)->shape(),
ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()}),
- channel_id) {
+ channel_id, is_host_transfer) {
AppendOperand(operand);
AppendOperand(token);
}
@@ -222,12 +231,14 @@ std::unique_ptr<HloInstruction> HloSendInstruction::CloneWithNewOperandsImpl(
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 2);
return MakeUnique<HloSendInstruction>(new_operands[0], new_operands[1],
- channel_id());
+ channel_id(), is_host_transfer());
}
-HloSendDoneInstruction::HloSendDoneInstruction(HloSendInstruction* operand)
+HloSendDoneInstruction::HloSendDoneInstruction(HloSendInstruction* operand,
+ bool is_host_transfer)
: HloSendRecvInstruction(HloOpcode::kSendDone, ShapeUtil::MakeTokenShape(),
- CHECK_NOTNULL(operand)->channel_id()) {
+ CHECK_NOTNULL(operand)->channel_id(),
+ is_host_transfer) {
AppendOperand(operand);
}
@@ -238,17 +249,18 @@ HloSendDoneInstruction::CloneWithNewOperandsImpl(
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return MakeUnique<HloSendDoneInstruction>(
- Cast<HloSendInstruction>(new_operands[0]));
+ Cast<HloSendInstruction>(new_operands[0]), is_host_transfer());
}
// Recv instruction produces a tuple of {receive buffer, U32 context}.
HloRecvInstruction::HloRecvInstruction(const Shape& shape,
- HloInstruction* token, int64 channel_id)
+ HloInstruction* token, int64 channel_id,
+ bool is_host_transfer)
: HloSendRecvInstruction(
HloOpcode::kRecv,
ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U32, {}),
ShapeUtil::MakeTokenShape()}),
- channel_id) {
+ channel_id, is_host_transfer) {
AppendOperand(token);
}
@@ -258,16 +270,18 @@ std::unique_ptr<HloInstruction> HloRecvInstruction::CloneWithNewOperandsImpl(
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return MakeUnique<HloRecvInstruction>(
- ShapeUtil::GetTupleElementShape(shape, 0), new_operands[0], channel_id());
+ ShapeUtil::GetTupleElementShape(shape, 0), new_operands[0], channel_id(),
+ is_host_transfer());
}
-HloRecvDoneInstruction::HloRecvDoneInstruction(HloRecvInstruction* operand)
+HloRecvDoneInstruction::HloRecvDoneInstruction(HloRecvInstruction* operand,
+ bool is_host_transfer)
: HloSendRecvInstruction(
HloOpcode::kRecvDone,
ShapeUtil::MakeTupleShape(
{ShapeUtil::GetTupleElementShape(operand->shape(), 0),
ShapeUtil::MakeTokenShape()}),
- CHECK_NOTNULL(operand)->channel_id()) {
+ CHECK_NOTNULL(operand)->channel_id(), is_host_transfer) {
AppendOperand(operand);
}
@@ -278,7 +292,7 @@ HloRecvDoneInstruction::CloneWithNewOperandsImpl(
HloCloneContext* context) const {
CHECK_EQ(new_operands.size(), 1);
return MakeUnique<HloRecvDoneInstruction>(
- Cast<HloRecvInstruction>(new_operands[0]));
+ Cast<HloRecvInstruction>(new_operands[0]), is_host_transfer());
}
HloAllReduceInstruction::HloAllReduceInstruction(
@@ -291,8 +305,6 @@ HloAllReduceInstruction::HloAllReduceInstruction(
replica_group_ids_(replica_group_ids.begin(), replica_group_ids.end()),
cross_replica_sum_barrier_(barrier.begin(), barrier.end()),
all_reduce_id_(all_reduce_id) {
- // TODO(b/79737069): Remove the CHECK when supported.
- CHECK(!all_reduce_id_);
for (auto operand : operands) {
AppendOperand(operand);
}
@@ -1914,4 +1926,93 @@ HloDynamicSliceInstruction::CloneWithNewOperandsImpl(
return MakeUnique<HloDynamicSliceInstruction>(
shape, new_operands[0], new_operands[1], dynamic_slice_sizes_);
}
+
+HloGatherInstruction::HloGatherInstruction(
+ const Shape& shape, HloInstruction* operand, HloInstruction* gather_indices,
+ const GatherDimensionNumbers& gather_dim_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds)
+ : HloInstruction(HloOpcode::kGather, shape) {
+ AppendOperand(operand);
+ AppendOperand(gather_indices);
+ gather_dimension_numbers_ =
+ MakeUnique<GatherDimensionNumbers>(gather_dim_numbers);
+ c_copy(window_bounds, std::back_inserter(gather_window_bounds_));
+}
+
+string HloGatherInstruction::GatherDimensionNumbersToString() const {
+ CHECK(gather_dimension_numbers_ != nullptr);
+ string output_window_dims =
+ StrCat("output_window_dims={",
+ Join(gather_dimension_numbers_->output_window_dims(), ","), "}");
+ string elided_window_dims =
+ StrCat("elided_window_dims={",
+ Join(gather_dimension_numbers_->elided_window_dims(), ","), "}");
+ string gather_dims_to_operand_dims = StrCat(
+ "gather_dims_to_operand_dims={",
+ Join(gather_dimension_numbers_->gather_dims_to_operand_dims(), ","), "}");
+ string index_vector_dim = StrCat(
+ "index_vector_dim=", gather_dimension_numbers_->index_vector_dim());
+
+ return Join<std::initializer_list<string>>(
+ {output_window_dims, elided_window_dims, gather_dims_to_operand_dims,
+ index_vector_dim},
+ ", ");
+}
+
+/* static */ GatherDimensionNumbers HloGatherInstruction::MakeGatherDimNumbers(
+ tensorflow::gtl::ArraySlice<int64> output_window_dims,
+ tensorflow::gtl::ArraySlice<int64> elided_window_dims,
+ tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
+ int64 index_vector_dim) {
+ GatherDimensionNumbers gather_dim_numbers;
+ for (int64 output_window_dim : output_window_dims) {
+ gather_dim_numbers.add_output_window_dims(output_window_dim);
+ }
+ for (int64 elided_window_dim : elided_window_dims) {
+ gather_dim_numbers.add_elided_window_dims(elided_window_dim);
+ }
+ for (int64 gather_dim_to_input_dim : gather_dims_to_operand_dims) {
+ gather_dim_numbers.add_gather_dims_to_operand_dims(gather_dim_to_input_dim);
+ }
+
+ gather_dim_numbers.set_index_vector_dim(index_vector_dim);
+ return gather_dim_numbers;
+}
+
+HloInstructionProto HloGatherInstruction::ToProto() const {
+ HloInstructionProto proto = HloInstruction::ToProto();
+ *proto.mutable_gather_dimension_numbers() = gather_dimension_numbers();
+ for (int64 bound : gather_window_bounds()) {
+ proto.add_gather_window_bounds(bound);
+ }
+ return proto;
+}
+
+std::vector<string> HloGatherInstruction::ExtraAttributesToStringImpl(
+ const HloPrintOptions& options) const {
+ return {GatherDimensionNumbersToString(),
+ StrCat("window_bounds={", Join(gather_window_bounds(), ","), "}")};
+}
+
+bool HloGatherInstruction::IdenticalSlowPath(
+ const HloInstruction& other,
+ const std::function<bool(const HloComputation*, const HloComputation*)>&
+ eq_computations) const {
+ const auto& casted_other = static_cast<const HloGatherInstruction&>(other);
+ return protobuf_util::ProtobufEquals(
+ gather_dimension_numbers(),
+ casted_other.gather_dimension_numbers()) &&
+ gather_window_bounds() == casted_other.gather_window_bounds();
+}
+
+std::unique_ptr<HloInstruction> HloGatherInstruction::CloneWithNewOperandsImpl(
+ const Shape& shape,
+ tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ HloCloneContext* context) const {
+ CHECK_EQ(new_operands.size(), 2);
+ return MakeUnique<HloGatherInstruction>(
+ shape, new_operands[0], new_operands[1], gather_dimension_numbers(),
+ gather_window_bounds());
+}
+
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_instructions.h b/tensorflow/compiler/xla/service/hlo_instructions.h
index e922d94234..e4031f04d5 100644
--- a/tensorflow/compiler/xla/service/hlo_instructions.h
+++ b/tensorflow/compiler/xla/service/hlo_instructions.h
@@ -141,12 +141,15 @@ class HloSendRecvInstruction : public HloInstruction {
// channel.
int64 channel_id() const { return channel_id_; }
+ // Returns whether this send/recv instruction sends data to/from the host.
+ bool is_host_transfer() const { return is_host_transfer_; }
+
// Returns a serialized representation of this instruction.
HloInstructionProto ToProto() const override;
protected:
explicit HloSendRecvInstruction(HloOpcode opcode, const Shape& shape,
- int64 channel_id);
+ int64 channel_id, bool is_host_transfer);
private:
std::vector<string> ExtraAttributesToStringImpl(
@@ -157,12 +160,15 @@ class HloSendRecvInstruction : public HloInstruction {
eq_computations) const override;
// Represents a unique identifier for each Send/Recv instruction pair.
int64 channel_id_;
+
+ // Whether this send/recv instruction sends data to/from the host.
+ bool is_host_transfer_;
};
class HloSendInstruction : public HloSendRecvInstruction {
public:
explicit HloSendInstruction(HloInstruction* operand, HloInstruction* token,
- int64 channel_id);
+ int64 channel_id, bool is_host_transfer);
private:
// Implementation for non-common logic of CloneWithNewOperands.
@@ -174,7 +180,8 @@ class HloSendInstruction : public HloSendRecvInstruction {
class HloSendDoneInstruction : public HloSendRecvInstruction {
public:
- explicit HloSendDoneInstruction(HloSendInstruction* operand);
+ explicit HloSendDoneInstruction(HloSendInstruction* operand,
+ bool is_host_transfer);
private:
// Implementation for non-common logic of CloneWithNewOperands.
@@ -187,7 +194,7 @@ class HloSendDoneInstruction : public HloSendRecvInstruction {
class HloRecvInstruction : public HloSendRecvInstruction {
public:
explicit HloRecvInstruction(const Shape& shape, HloInstruction* token,
- int64 channel_id);
+ int64 channel_id, bool is_host_transfer);
private:
// Implementation for non-common logic of CloneWithNewOperands.
@@ -199,7 +206,8 @@ class HloRecvInstruction : public HloSendRecvInstruction {
class HloRecvDoneInstruction : public HloSendRecvInstruction {
public:
- explicit HloRecvDoneInstruction(HloRecvInstruction* operand);
+ explicit HloRecvDoneInstruction(HloRecvInstruction* operand,
+ bool is_host_transfer);
private:
// Implementation for non-common logic of CloneWithNewOperands.
@@ -1148,6 +1156,49 @@ class HloDynamicSliceInstruction : public HloInstruction {
// ('start' is specified dynamically in the second operand of the operation).
std::vector<int64> dynamic_slice_sizes_;
};
+
+class HloGatherInstruction : public HloInstruction {
+ public:
+ explicit HloGatherInstruction(
+ const Shape& shape, HloInstruction* operand,
+ HloInstruction* gather_indices,
+ const GatherDimensionNumbers& gather_dim_numbers,
+ tensorflow::gtl::ArraySlice<int64> window_bounds);
+ const GatherDimensionNumbers& gather_dimension_numbers() const {
+ CHECK(gather_dimension_numbers_ != nullptr);
+ return *gather_dimension_numbers_;
+ }
+ tensorflow::gtl::ArraySlice<int64> gather_window_bounds() const {
+ return gather_window_bounds_;
+ }
+ // Returns the dump string of the gather dimension numbers.
+ string GatherDimensionNumbersToString() const;
+ // Returns a serialized representation of this instruction.
+ HloInstructionProto ToProto() const override;
+
+ // Creates an instance of GatherDimensionNumbers.
+ static GatherDimensionNumbers MakeGatherDimNumbers(
+ tensorflow::gtl::ArraySlice<int64> output_window_dims,
+ tensorflow::gtl::ArraySlice<int64> elided_window_dims,
+ tensorflow::gtl::ArraySlice<int64> gather_dims_to_operand_dims,
+ int64 index_vector_dim);
+
+ private:
+ std::vector<string> ExtraAttributesToStringImpl(
+ const HloPrintOptions& options) const override;
+ bool IdenticalSlowPath(
+ const HloInstruction& other,
+ const std::function<bool(const HloComputation*, const HloComputation*)>&
+ eq_computations) const override;
+ std::unique_ptr<HloInstruction> CloneWithNewOperandsImpl(
+ const Shape& shape,
+ tensorflow::gtl::ArraySlice<HloInstruction*> new_operands,
+ HloCloneContext* context) const override;
+
+ std::unique_ptr<GatherDimensionNumbers> gather_dimension_numbers_;
+ std::vector<int64> gather_window_bounds_;
+};
+
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_HLO_INSTRUCTIONS_H_
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
index 6bcd7b042d..10bf9ffd6c 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.cc
@@ -20,6 +20,8 @@ limitations under the License.
#include <utility>
#include "tensorflow/compiler/xla/ptr_util.h"
+#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/util.h"
@@ -75,10 +77,23 @@ Status HloModuleGroupMetadata::Build() {
if (tracked == nullptr) {
return Status::OK();
}
- // Add the parent computation of this channel instruction and its peer
- // computation (both must be while computations) as companions.
+
+ std::vector<HloComputation*> peers;
if (IsChannelInstruction(hlo)) {
- HloComputation* peer_computation = PeerComputation(hlo);
+ peers.push_back(PeerComputation(hlo));
+ } else if (hlo->IsCrossModuleAllReduce()) {
+ for (HloInstruction* instr : GetAllReduceGroup(*hlo->all_reduce_id())) {
+ if (instr == hlo) {
+ continue;
+ }
+ peers.push_back(instr->parent());
+ }
+ }
+
+ // Add the parent computation of this channel (or all-reduce) instruction
+ // and its peer computation(s) (both must be while computations) as
+ // companions.
+ for (HloComputation* peer_computation : peers) {
const TrackedInstruction* peer_tracked =
GetTrackedInstruction(peer_computation);
TF_RET_CHECK(peer_tracked != nullptr)
@@ -162,8 +177,12 @@ bool HloModuleGroupMetadata::IsChannelInstruction(
case HloOpcode::kSend:
case HloOpcode::kRecv:
case HloOpcode::kSendDone:
- case HloOpcode::kRecvDone:
- return true;
+ case HloOpcode::kRecvDone: {
+ const HloSendRecvInstruction* send_recv_instr =
+ DynCast<HloSendRecvInstruction>(instruction);
+ CHECK(send_recv_instr != nullptr);
+ return !send_recv_instr->is_host_transfer();
+ }
default:
return false;
}
@@ -175,7 +194,8 @@ bool HloModuleGroupMetadata::IsCompanionInstruction(HloInstruction* hlo) const {
bool HloModuleGroupMetadata::InstructionCommunicates(
HloInstruction* hlo) const {
- return IsChannelInstruction(hlo) || IsCompanionInstruction(hlo);
+ return IsChannelInstruction(hlo) || IsCompanionInstruction(hlo) ||
+ hlo->IsCrossModuleAllReduce();
}
const HloModuleGroupMetadata::Channel& HloModuleGroupMetadata::GetChannel(
@@ -200,6 +220,13 @@ HloComputation* HloModuleGroupMetadata::PeerComputation(
}
}
+const std::vector<HloInstruction*>& HloModuleGroupMetadata::GetAllReduceGroup(
+ int64 all_reduce_id) const {
+ auto it = all_reduce_map_.find(all_reduce_id);
+ CHECK(it != all_reduce_map_.end());
+ return it->second;
+}
+
std::vector<HloModuleGroupMetadata::TrackedInstruction>
HloModuleGroupMetadata::GetCompanionsPath(const HloInstruction* hlo) const {
std::vector<TrackedInstruction> path;
@@ -278,10 +305,27 @@ Status HloModuleGroupMetadata::RecordInstructions() {
tracked_instructions_[hlo->to_apply()] =
TrackedInstruction(hlo, ComputationKind::kCallFunction);
}
+
+ // Group cross module all-reduce instructions by the all_reduce id.
+ if (hlo->IsCrossModuleAllReduce()) {
+ TF_RET_CHECK(channel_id_map_.find(*hlo->all_reduce_id()) ==
+ channel_id_map_.end())
+ << "all_reduce_id " << *hlo->all_reduce_id()
+ << " is already used by a send/recv instruction";
+ all_reduce_map_[*hlo->all_reduce_id()].push_back(hlo);
+ max_channel_id_ = std::max(max_channel_id_, *hlo->all_reduce_id());
+ return Status::OK();
+ }
+
if (!IsChannelInstruction(hlo)) {
return Status::OK();
}
+ TF_RET_CHECK(all_reduce_map_.find(hlo->channel_id()) ==
+ all_reduce_map_.end())
+ << "channel id " << hlo->channel_id()
+ << " is already used by an all-reduce instruction";
+
// Add a new channel if needed.
if (channel_id_map_.find(hlo->channel_id()) == channel_id_map_.end()) {
channels_.emplace_back();
@@ -324,6 +368,7 @@ Status HloModuleGroupMetadata::RecordInstructions() {
}
}
VLOG(2) << "Created " << channels_.size() << " channels";
+ VLOG(2) << "Created " << all_reduce_map_.size() << " all-reduce groups";
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
index ffde3a332d..84f2d3f5fb 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_module_group_metadata.h
@@ -92,7 +92,7 @@ class HloModuleGroupMetadata {
ComputationKind kind_ = ComputationKind::kInvalid;
};
- // Represents a channel and the 4 instructions that form the channel.
+ // Represents a channel and the instructions that form the channel.
struct Channel {
int64 id = -1;
HloInstruction* send = nullptr;
@@ -118,13 +118,17 @@ class HloModuleGroupMetadata {
// comment above on companion instructions.
bool IsCompanionInstruction(HloInstruction* hlo) const;
- // Returns true if the instruction is either a channel instruction or a
- // companion instruction.
+ // Returns true if the instruction is either a channel instruction, a
+ // cross-module all-reduce instruction, or a companion instruction.
bool InstructionCommunicates(HloInstruction* hlo) const;
// Returns the Channel instance for the given channel id.
const Channel& GetChannel(int64 channel_id) const;
+ // Returns the all-reduce instructions with the same all_reduce_id.
+ const std::vector<HloInstruction*>& GetAllReduceGroup(
+ int64 all_reduce_id) const;
+
// Returns the computation that contains the peer channel instructions for
// the given instruction.
//
@@ -187,13 +191,14 @@ class HloModuleGroupMetadata {
// Returns all channels in the module group.
const std::vector<Channel>& channels() const { return channels_; }
- // Returns the maximum channel id used in the module group.
+ // Returns the maximum channel id or all_reduce_id used in the module group.
int64 max_channel_id() const { return max_channel_id_; }
private:
Status Build();
- // Record all channel instructions and While instructions.
+ // Record all channel instructions, cross-module AllReduce instructions, and
+ // While/Conditional/Call instructions.
Status RecordInstructions();
// Verifies the given HloModules are well-formed and follow the specification,
@@ -255,6 +260,9 @@ class HloModuleGroupMetadata {
// Map from channel ids to the index in channels_.
tensorflow::gtl::FlatMap<int64, int64> channel_id_map_;
+ // Map from all-reduce ids to the all reduce instructions.
+ tensorflow::gtl::FlatMap<int64, std::vector<HloInstruction*>> all_reduce_map_;
+
// The maximum channel id used in the module group.
int64 max_channel_id_ = -1;
diff --git a/tensorflow/compiler/xla/service/hlo_module_group_util.cc b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
index df1d562048..9fd0ade153 100644
--- a/tensorflow/compiler/xla/service/hlo_module_group_util.cc
+++ b/tensorflow/compiler/xla/service/hlo_module_group_util.cc
@@ -56,12 +56,17 @@ std::vector<HloInstruction*> HloModuleGroupUtil::GlobalPredecessors(
};
// If the given instruction is a companion instruction, we need to find the
- // predecessors of all of its companion instructions.
+ // predecessors of all of its companion instructions. If the instruction is an
+ // all-reduce, we need to find the predecessors of all the peer all-reduce
+ // instructions.
std::vector<HloInstruction*> instruction_group;
if (metadata_.IsCompanionInstruction(instruction)) {
for (HloInstruction* companion : metadata_.Companions(instruction)) {
instruction_group.push_back(companion);
}
+ } else if (instruction->IsCrossModuleAllReduce()) {
+ instruction_group =
+ metadata_.GetAllReduceGroup(*instruction->all_reduce_id());
} else {
instruction_group.push_back(instruction);
}
@@ -112,12 +117,17 @@ std::vector<HloInstruction*> HloModuleGroupUtil::GlobalSuccessors(
};
// If the given instruction is a companion instruction, we need to find the
- // successors of all of its companion instructions.
+ // successors of all of its companion instructions. If the instruction is an
+ // all-reduce, we need to find the successors of all its peer all-reduce
+ // instructions.
std::vector<HloInstruction*> instruction_group;
if (metadata_.IsCompanionInstruction(instruction)) {
for (HloInstruction* companion : metadata_.Companions(instruction)) {
instruction_group.push_back(companion);
}
+ } else if (instruction->IsCrossModuleAllReduce()) {
+ instruction_group =
+ metadata_.GetAllReduceGroup(*instruction->all_reduce_id());
} else {
instruction_group.push_back(instruction);
}
@@ -170,15 +180,17 @@ Status HloModuleGroupUtil::VisitTopologicalOrder(
HloInstruction* hlo = stack.top();
// Find the instruction group of the currently visited instruction. The
- // instruction group represents all companion instructions of the
- // current instruction, and are considered to be a single entity for the
- // purpose of the traversal (i.e., they must always be in the same visit
- // state).
+ // instruction group represents all companion instructions of the current
+ // instruction, or all the all-reduce instructions that belong to the same
+ // group, or are considered to be a single entity for the purpose of the
+ // traversal (i.e., they must always be in the same visit state).
std::vector<HloInstruction*> instruction_group;
if (metadata_.IsCompanionInstruction(hlo)) {
for (HloInstruction* companion : metadata_.Companions(hlo)) {
instruction_group.push_back(companion);
}
+ } else if (hlo->IsCrossModuleAllReduce()) {
+ instruction_group = metadata_.GetAllReduceGroup(*hlo->all_reduce_id());
} else {
instruction_group.push_back(hlo);
}
diff --git a/tensorflow/compiler/xla/service/hlo_opcode.h b/tensorflow/compiler/xla/service/hlo_opcode.h
index 39e12c4815..59e9a5a94a 100644
--- a/tensorflow/compiler/xla/service/hlo_opcode.h
+++ b/tensorflow/compiler/xla/service/hlo_opcode.h
@@ -87,6 +87,7 @@ namespace xla {
V(kHostCompute, "host-compute") \
V(kImag, "imag") \
V(kInfeed, "infeed") \
+ V(kIota, "iota") \
V(kIsFinite, "is-finite") \
V(kLe, "less-than-or-equal-to", kHloOpcodeIsComparison) \
V(kLog, "log") \
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index f162d52d3c..e8eaf54949 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/hlo_domain_metadata.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/hlo_sharding_metadata.h"
#include "tensorflow/compiler/xla/shape_util.h"
@@ -118,6 +119,7 @@ class HloParser {
// Types of attributes.
enum class AttrTy {
+ kBool,
kInt64,
kInt32,
kFloat,
@@ -490,6 +492,14 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
HloInstruction::CreateConstant(std::move(literal)));
break;
}
+ case HloOpcode::kIota: {
+ if (!ParseOperands(&operands, /*expected_size=*/0) ||
+ !ParseAttributes(attrs)) {
+ return false;
+ }
+ instruction = builder->AddInstruction(HloInstruction::CreateIota(shape));
+ break;
+ }
// Unary ops.
case HloOpcode::kAbs:
case HloOpcode::kRoundNearestAfz:
@@ -680,18 +690,27 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
}
case HloOpcode::kRecv: {
optional<tensorflow::int64> channel_id;
+ // If the is_host_transfer attribute is not present then default to false.
+ optional<bool> is_host_transfer = false;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
+ attrs["is_host_transfer"] = {/*required=*/false, AttrTy::kBool,
+ &is_host_transfer};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
}
+ // If the is_host_transfer attribute is not present then default to false.
instruction = builder->AddInstruction(HloInstruction::CreateRecv(
- shape.tuple_shapes(0), operands[0], *channel_id));
+ shape.tuple_shapes(0), operands[0], *channel_id, *is_host_transfer));
break;
}
case HloOpcode::kRecvDone: {
optional<tensorflow::int64> channel_id;
+ // If the is_host_transfer attribute is not present then default to false.
+ optional<bool> is_host_transfer = false;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
+ attrs["is_host_transfer"] = {/*required=*/false, AttrTy::kBool,
+ &is_host_transfer};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
@@ -699,24 +718,32 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
if (channel_id != operands[0]->channel_id()) {
return false;
}
- instruction =
- builder->AddInstruction(HloInstruction::CreateRecvDone(operands[0]));
+ instruction = builder->AddInstruction(
+ HloInstruction::CreateRecvDone(operands[0], *is_host_transfer));
break;
}
case HloOpcode::kSend: {
optional<tensorflow::int64> channel_id;
+ // If the is_host_transfer attribute is not present then default to false.
+ optional<bool> is_host_transfer = false;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
+ attrs["is_host_transfer"] = {/*required=*/false, AttrTy::kBool,
+ &is_host_transfer};
if (!ParseOperands(&operands, /*expected_size=*/2) ||
!ParseAttributes(attrs)) {
return false;
}
- instruction = builder->AddInstruction(
- HloInstruction::CreateSend(operands[0], operands[1], *channel_id));
+ instruction = builder->AddInstruction(HloInstruction::CreateSend(
+ operands[0], operands[1], *channel_id, *is_host_transfer));
break;
}
case HloOpcode::kSendDone: {
optional<tensorflow::int64> channel_id;
+ // If the is_host_transfer attribute is not present then default to false.
+ optional<bool> is_host_transfer = false;
attrs["channel_id"] = {/*required=*/true, AttrTy::kInt64, &channel_id};
+ attrs["is_host_transfer"] = {/*required=*/false, AttrTy::kBool,
+ &is_host_transfer};
if (!ParseOperands(&operands, /*expected_size=*/1) ||
!ParseAttributes(attrs)) {
return false;
@@ -724,8 +751,8 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
if (channel_id != operands[0]->channel_id()) {
return false;
}
- instruction =
- builder->AddInstruction(HloInstruction::CreateSendDone(operands[0]));
+ instruction = builder->AddInstruction(
+ HloInstruction::CreateSendDone(operands[0], *is_host_transfer));
break;
}
case HloOpcode::kGetTupleElement: {
@@ -1192,11 +1219,12 @@ bool HloParser::ParseInstruction(HloComputation::Builder* builder,
return false;
}
- GatherDimensionNumbers dim_numbers = HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/*output_window_dims,
- /*elided_window_dims=*/*elided_window_dims,
- /*gather_dims_to_operand_dims=*/*gather_dims_to_operand_dims,
- /*index_vector_dim=*/*index_vector_dim);
+ GatherDimensionNumbers dim_numbers =
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/*output_window_dims,
+ /*elided_window_dims=*/*elided_window_dims,
+ /*gather_dims_to_operand_dims=*/*gather_dims_to_operand_dims,
+ /*index_vector_dim=*/*index_vector_dim);
instruction = builder->AddInstruction(HloInstruction::CreateGather(
shape, /*operand=*/operands[0], /*gather_indices=*/operands[1],
@@ -2041,6 +2069,14 @@ bool HloParser::ParseAttributeHelper(
bool success = [&] {
LocTy attr_loc = lexer_.GetLoc();
switch (attr_type) {
+ case AttrTy::kBool: {
+ bool result;
+ if (!ParseBool(&result)) {
+ return false;
+ }
+ static_cast<optional<bool>*>(attr_out_ptr)->emplace(result);
+ return true;
+ }
case AttrTy::kInt64: {
tensorflow::int64 result;
if (!ParseInt64(&result)) {
diff --git a/tensorflow/compiler/xla/service/hlo_parser_test.cc b/tensorflow/compiler/xla/service/hlo_parser_test.cc
index f06c705c42..1f0572c576 100644
--- a/tensorflow/compiler/xla/service/hlo_parser_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser_test.cc
@@ -288,6 +288,21 @@ ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
)"
},
+{
+"SendRecvWithHostTransfer",
+R"(HloModule HostTransferSendRecv_module
+
+ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
+ %token = token[] after-all()
+ %recv = (f32[], u32[], token[]) recv(token[] %token), channel_id=15, is_host_transfer=true
+ ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
+ %constant = f32[] constant(2.1), sharding={maximal device=0}
+ %send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token), channel_id=16, is_host_transfer=true
+ %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
+}
+
+)"
+},
// get-tuple-element
{
"GetTupleElement",
@@ -990,6 +1005,17 @@ ENTRY CrossReplicaSumWithSubgroups {
}
)"
+},
+// Iota
+{
+"Iota",
+R"(HloModule iota
+
+ENTRY Iota {
+ ROOT iota = f32[100]{0} iota()
+}
+
+)"
}
});
// clang-format on
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.cc b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
index 59a8800a7d..cf0be30c7a 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.cc
@@ -1203,7 +1203,7 @@ StatusOr<bool> HloRematerialization::RematerializeComputation(
StatusOr<bool> HloRematerialization::Run(
HloModule* module, SequentialHloOrdering::HloModuleSequence* sequence,
int64 memory_limit_bytes, RematerializationSizes* sizes,
- bool run_copy_elision) {
+ CopyInsertion* copy_insertion) {
// The sequence is constructed entirely by this method.
TF_RET_CHECK(sequence->empty());
@@ -1238,13 +1238,14 @@ StatusOr<bool> HloRematerialization::Run(
return size_function_(buffer.shape());
},
scheduler_algorithm_));
- if (run_copy_elision) {
+ if (copy_insertion) {
// We run a separate pass of copy elision here because the sequential
// ordering from the HLO schedule allows for more copies to be eliminated.
// TODO(b/80249101): Instead of a separate copy elision pass, use the
// ordering from the HLO schedule directly for copy insertion.
SequentialHloOrdering ordering(module, *sequence);
- TF_RETURN_IF_ERROR(RemoveUnnecessaryCopies(ordering, module));
+ TF_RETURN_IF_ERROR(
+ copy_insertion->RemoveUnnecessaryCopies(ordering, module));
}
// Compute peak memory usage of all computations in the module called in a
@@ -1349,10 +1350,10 @@ StatusOr<bool> HloRematerialization::Run(
int64 memory_limit_bytes, HloModule* hlo_module,
MemorySchedulerAlgorithm scheduler_algorithm,
SequentialHloOrdering::HloModuleSequence* sequence,
- RematerializationSizes* sizes, bool run_copy_elision) {
+ RematerializationSizes* sizes, CopyInsertion* copy_insertion) {
HloRematerialization remat(scheduler_algorithm, size_function);
return remat.Run(hlo_module, sequence, memory_limit_bytes, sizes,
- run_copy_elision);
+ copy_insertion);
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization.h b/tensorflow/compiler/xla/service/hlo_rematerialization.h
index 59b4cf5dcc..2ec004350a 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization.h
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization.h
@@ -17,6 +17,7 @@
#include "tensorflow/compiler/xla/service/buffer_liveness.h"
#include "tensorflow/compiler/xla/service/call_graph.h"
+#include "tensorflow/compiler/xla/service/copy_insertion.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
@@ -57,8 +58,9 @@ class HloRematerialization {
// sizes: Optional outparam that indicates the peak memory usage of the HLO
// module before/after rematerialization.
//
- // run_copy_elision: Enable copy elision. This pass is used to eliminate
- // copies that were inserted before HLO scheduling.
+ // copy_insertion: If non-null, run copy elision after scheduling. This
+ // pass is used to eliminate copies that were inserted by copy insertion
+ // before HLO scheduling.
//
// TODO(b/80249101): Remove the 'run_copy_elision' parameter when copy
// insertion is integrated with HLO scheduling.
@@ -74,7 +76,7 @@ class HloRematerialization {
const ShapeSizeFunction& size_function, int64 memory_limit_bytes,
HloModule* hlo_module, MemorySchedulerAlgorithm scheduler_algorithm,
SequentialHloOrdering::HloModuleSequence* sequence,
- RematerializationSizes* sizes, bool run_copy_elision = true);
+ RematerializationSizes* sizes, CopyInsertion* copy_insertion = nullptr);
protected:
HloRematerialization(MemorySchedulerAlgorithm scheduler_algorithm,
@@ -90,7 +92,7 @@ class HloRematerialization {
StatusOr<bool> Run(HloModule* module,
SequentialHloOrdering::HloModuleSequence* sequence,
int64 memory_limit, RematerializationSizes* sizes,
- bool run_copy_elision);
+ CopyInsertion* copy_insertion);
// Rematerializes instructions within the given computation. 'order' is the
// order in which the computation's instructions will be emitted in the
diff --git a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
index cd131147e6..ac8c97d380 100644
--- a/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
+++ b/tensorflow/compiler/xla/service/hlo_rematerialization_test.cc
@@ -147,7 +147,7 @@ class HloRematerializationTest : public HloTestBase {
TF_EXPECT_OK(verifier().Run(module).status());
return HloRematerialization::RematerializeAndSchedule(
ByteSizeOf, memory_limit_bytes, module, DefaultMemoryScheduler,
- sequence, /*sizes=*/nullptr, /*run_copy_elision=*/false);
+ sequence, /*sizes=*/nullptr);
}
// Various shapes used in the canned computations.
diff --git a/tensorflow/compiler/xla/service/hlo_runner.cc b/tensorflow/compiler/xla/service/hlo_runner.cc
index 4f0569f405..b2725e2918 100644
--- a/tensorflow/compiler/xla/service/hlo_runner.cc
+++ b/tensorflow/compiler/xla/service/hlo_runner.cc
@@ -180,8 +180,12 @@ StatusOr<ScopedShapedBuffer> HloRunner::ExecuteWithDeviceBuffers(
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable,
CreateExecutable(std::move(module), run_hlo_passes));
- return executable->ExecuteOnStreamWrapper(&service_run_options,
- /*profile=*/profile, arguments);
+ TF_ASSIGN_OR_RETURN(
+ ScopedShapedBuffer retval,
+ executable->ExecuteOnStreamWrapper(&service_run_options,
+ /*profile=*/profile, arguments));
+ TF_RETURN_IF_ERROR(stream.BlockHostUntilDone());
+ return std::move(retval);
}
StatusOr<ScopedShapedBuffer> HloRunner::ExecuteWithDeviceBuffers(
@@ -309,6 +313,7 @@ StatusOr<std::vector<std::unique_ptr<Literal>>> HloRunner::ExecuteReplicated(
std::vector<std::unique_ptr<Literal>> exec_results;
for (int64 i = 0; i < options.num_replicas; ++i) {
+ TF_RETURN_IF_ERROR(streams[i]->BlockHostUntilDone());
TF_ASSIGN_OR_RETURN(std::unique_ptr<Literal> literal,
backend().transfer_manager()->TransferLiteralFromDevice(
streams[i].get(), results[i]));
diff --git a/tensorflow/compiler/xla/service/hlo_scheduling.cc b/tensorflow/compiler/xla/service/hlo_scheduling.cc
index c6d3909af6..27cc5361cd 100644
--- a/tensorflow/compiler/xla/service/hlo_scheduling.cc
+++ b/tensorflow/compiler/xla/service/hlo_scheduling.cc
@@ -567,6 +567,7 @@ StatusOr<SequentialHloOrdering::HloModuleSequence> ScheduleComputationsInModule(
sequence[computation] = std::move(one_computation_sequence);
}
}
+ VLOG(1) << "Module schedule:\n" << sequence;
return sequence;
}
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
index 4f91d619ef..94f5a3b273 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.cc
@@ -245,21 +245,6 @@ StatusOr<int64> ApplyDomainShardingPass(const DomainMetadata::Domain& domain,
Status ApplyDomainSharding(const DomainMetadata::Domain& domain,
const HloSharding& sharding) {
- // Here is the place to call external sharding normalizers, which are
- // implemented in other modules (ie, spatial partitioning).
- // The signature of the external normalizer function should be something
- // like:
- //
- // StatusOr<bool> Normalizer(const DomainMetadata::Domain&,
- // const HloSharding& sharding);
- //
- // The function should return true if it has processed the domain
- // normalization, false if domain was not one recognized by it, or an error.
- // We will call the functions in order below, and fall back to local code if
- // none of the external normalizers acted on the domain.
- // External normalizers should not handle the cases that are already handled
- // locally.
-
// None of the external normalizers handled the domain sharding, try to see
// whether this is a single sharding first.
auto single_sharding = sharding.ExtractSingleSharding();
@@ -390,25 +375,36 @@ string ShardingMetadata::ToString() const {
return sharding_ != nullptr ? sharding_->ToString() : "{}";
}
-Status ShardingMetadata::NormalizeInstructions(
- const DomainMetadata::Domain& domain) const {
- if (sharding_ != nullptr) {
- VLOG(4) << "Normalizing sharding to " << sharding_->ToString() << ":";
- TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding_));
- TF_RETURN_IF_ERROR(FixupPassThroughDomainLinks(domain, *sharding_));
+/*static*/ StatusOr<const ShardingMetadata*>
+ShardingMetadata::ToShardingMetadata(const DomainMetadata* metadata) {
+ if (metadata->Kind() != ShardingMetadata::KindName()) {
+ return Status(
+ tensorflow::error::INVALID_ARGUMENT,
+ "ShardingMetadata normalizer called with incorrect domain metadata");
}
- return Status::OK();
+ return static_cast<const ShardingMetadata*>(metadata);
}
-Status NormalizeShardingDomain(const DomainMetadata::Domain& domain) {
- TF_ASSIGN_OR_RETURN(std::unique_ptr<HloSharding> sharding,
- ExtractOriginalCommonSharding(domain.instructions));
- if (sharding != nullptr) {
- VLOG(4) << "Normalizing sharding-less domain to " << sharding->ToString()
- << ":";
- TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+Status ShardingMetadata::NormalizeShardingDomain(
+ const DomainMetadata::Domain& domain, const DomainMetadata* metadata) {
+ if (metadata != nullptr) {
+ TF_ASSIGN_OR_RETURN(const auto& sharding_metadata,
+ ToShardingMetadata(metadata));
+ const HloSharding* sharding = sharding_metadata->sharding();
+ if (sharding != nullptr) {
+ VLOG(4) << "Normalizing sharding to " << sharding->ToString() << ":";
+ TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+ TF_RETURN_IF_ERROR(FixupPassThroughDomainLinks(domain, *sharding));
+ }
} else {
- VLOG(1) << "Unable to find common sharding";
+ TF_ASSIGN_OR_RETURN(std::unique_ptr<HloSharding> sharding,
+ ExtractOriginalCommonSharding(domain.instructions));
+ if (sharding != nullptr) {
+ VLOG(4) << "Normalizing sharding-less domain to " << sharding->ToString();
+ TF_RETURN_IF_ERROR(ApplyDomainSharding(domain, *sharding));
+ } else {
+ VLOG(1) << "Unable to find common sharding";
+ }
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/hlo_sharding_metadata.h b/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
index ec162c3490..5e01fc0e22 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
+++ b/tensorflow/compiler/xla/service/hlo_sharding_metadata.h
@@ -38,23 +38,26 @@ class ShardingMetadata : public DomainMetadata {
string ToString() const override;
- Status NormalizeInstructions(
- const DomainMetadata::Domain& domain) const override;
+ const HloSharding* sharding() const { return sharding_.get(); }
static tensorflow::StringPiece KindName() { return "sharding"; }
+ static StatusOr<const ShardingMetadata*> ToShardingMetadata(
+ const DomainMetadata* metadata);
+
+ // Apply the specified domain metadata onto the specified domain. If no
+ // metadata is specified then apply sharding heuristics and normalize the
+ // instructions whose sharding deviates from the one which is inferred as to
+ // be the original one. Policy wise, HLO passes are allowed to create new
+ // unassigned instructions, but if they do create assigned ones, they have to
+ // conform to the ones around.
+ static Status NormalizeShardingDomain(const DomainMetadata::Domain& domain,
+ const DomainMetadata* metadata);
+
private:
std::unique_ptr<HloSharding> sharding_;
};
-// Within a set of instructions which had common sharding attributes before
-// entring the HLO passes pipeline, apply sharding heuristics and normalize the
-// instructions whose sharding deviates from the one which is inferred as to be
-// the original one.
-// Policy wise, HLO passes are allowed to create new unassigned instructions,
-// but if they do create assigned ones, they have to conform to the ones around.
-Status NormalizeShardingDomain(const DomainMetadata::Domain& domain);
-
// Given an HLO graph edge between instruction and one of its operands, creates
// a ShardingMetadata based kDomain instruction if the sharding between
// instruction and operand changes. Returns nullptr if there is no need for a
diff --git a/tensorflow/compiler/xla/service/hlo_verifier.cc b/tensorflow/compiler/xla/service/hlo_verifier.cc
index 48eeba6afd..c80c1e0e7d 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier.cc
+++ b/tensorflow/compiler/xla/service/hlo_verifier.cc
@@ -210,6 +210,12 @@ Status ShapeVerifier::HandleConstant(HloInstruction* constant) {
return CheckShape(constant, constant->literal().shape());
}
+Status ShapeVerifier::HandleIota(HloInstruction* iota) {
+ return ShapeUtil::Rank(iota->shape()) == 1
+ ? Status::OK()
+ : InternalError("Iota only supports arrays of rank 1.");
+}
+
Status ShapeVerifier::HandleGetTupleElement(HloInstruction* get_tuple_element) {
return CheckShape(get_tuple_element,
ShapeInference::InferGetTupleElementShape(
@@ -382,11 +388,6 @@ Status ShapeVerifier::HandlePad(HloInstruction* pad) {
}
Status ShapeVerifier::HandleSend(HloInstruction* send) {
- TF_RET_CHECK(send->users().size() == 1);
- const HloInstruction* send_done = send->users().front();
- TF_RET_CHECK(send_done->opcode() == HloOpcode::kSendDone);
- TF_RETURN_IF_ERROR(CheckSameChannel(send, send_done));
- TF_RETURN_IF_ERROR(CheckIsTokenOperand(send, 1));
return CheckShape(send,
ShapeUtil::MakeTupleShape({send->operand(0)->shape(),
ShapeUtil::MakeShape(U32, {}),
@@ -394,34 +395,22 @@ Status ShapeVerifier::HandleSend(HloInstruction* send) {
}
Status ShapeVerifier::HandleSendDone(HloInstruction* send_done) {
- TF_RET_CHECK(send_done->operands().size() == 1);
- const HloInstruction* send = send_done->operand(0);
- TF_RET_CHECK(send->opcode() == HloOpcode::kSend);
- TF_RETURN_IF_ERROR(CheckSameChannel(send, send_done));
-
return CheckShape(send_done, ShapeUtil::MakeTokenShape());
}
Status ShapeVerifier::HandleRecv(HloInstruction* recv) {
- TF_RET_CHECK(recv->users().size() == 1);
- const HloInstruction* recv_done = recv->users().front();
- TF_RET_CHECK(recv_done->opcode() == HloOpcode::kRecvDone);
- TF_RETURN_IF_ERROR(CheckSameChannel(recv, recv_done));
- TF_RETURN_IF_ERROR(CheckIsTokenOperand(recv, 0));
return CheckShape(
recv, ShapeUtil::MakeTupleShape(
- {ShapeUtil::GetTupleElementShape(recv_done->shape(), 0),
+ {ShapeUtil::GetTupleElementShape(recv->shape(), 0),
ShapeUtil::MakeShape(U32, {}), ShapeUtil::MakeTokenShape()}));
}
Status ShapeVerifier::HandleRecvDone(HloInstruction* recv_done) {
- TF_RET_CHECK(recv_done->operands().size() == 1);
- const HloInstruction* recv = recv_done->operand(0);
- TF_RET_CHECK(recv->opcode() == HloOpcode::kRecv);
- TF_RETURN_IF_ERROR(CheckSameChannel(recv, recv_done));
- return CheckShape(recv_done,
- ShapeUtil::MakeTupleShape({recv->shape().tuple_shapes(0),
- ShapeUtil::MakeTokenShape()}));
+ return CheckShape(
+ recv_done,
+ ShapeUtil::MakeTupleShape(
+ {ShapeUtil::GetTupleElementShape(recv_done->operand(0)->shape(), 0),
+ ShapeUtil::MakeTokenShape()}));
}
Status ShapeVerifier::HandleBatchNormTraining(
@@ -627,19 +616,6 @@ Status ShapeVerifier::CheckVariadicShape(const HloInstruction* instruction) {
instruction->opcode(), instruction->operands()));
}
-// Checks if the given two instructions shares the same channel id.
-Status ShapeVerifier::CheckSameChannel(const HloInstruction* instr1,
- const HloInstruction* instr2) {
- if (instr1->channel_id() != instr2->channel_id()) {
- return InternalError(
- "Expected to have the same channel id, actual channel ids are: %s "
- "(%lld), %s (%lld)",
- instr1->ToString().c_str(), instr1->channel_id(),
- instr2->ToString().c_str(), instr2->channel_id());
- }
- return Status::OK();
-}
-
string ComputationsToString(
tensorflow::gtl::ArraySlice<HloComputation*> computations) {
return tensorflow::str_util::Join(
@@ -908,10 +884,105 @@ Status VerifyEntryAndExitShapes(const HloModule& module) {
return Status::OK();
}
+// Checks if the given two instructions share the same channel id.
+Status CheckSameChannel(const HloInstruction* instr1,
+ const HloInstruction* instr2) {
+ if (instr1->channel_id() != instr2->channel_id()) {
+ return InternalError(
+ "Expected to have the same channel id, actual channel ids are: %s "
+ "(%lld), %s (%lld)",
+ instr1->ToString().c_str(), instr1->channel_id(),
+ instr2->ToString().c_str(), instr2->channel_id());
+ }
+ return Status::OK();
+}
+
+// Checks if the given two instructions have the same is_host_transfer attribute
+// value. Intsructions must be send/recv instructions or their 'done' variant.
+Status CheckSameIsHostTransfer(const HloInstruction* instr1,
+ const HloInstruction* instr2) {
+ const HloSendRecvInstruction* send_recv1 =
+ DynCast<const HloSendRecvInstruction>(instr1);
+ const HloSendRecvInstruction* send_recv2 =
+ DynCast<const HloSendRecvInstruction>(instr2);
+ TF_RET_CHECK(send_recv1 != nullptr);
+ TF_RET_CHECK(send_recv2 != nullptr);
+ if (send_recv1->is_host_transfer() != send_recv2->is_host_transfer()) {
+ return InternalError(
+ "Expected instructions to have the same is-host-transfer property: %s, "
+ "%s ",
+ instr1->ToString().c_str(), instr2->ToString().c_str());
+ }
+ return Status::OK();
+}
+
+// Checks various invariants of send and recv instructions.
+Status VerifySendsAndRecvs(const HloModule& module) {
+ tensorflow::gtl::FlatMap<int64, const HloInstruction*> host_channels;
+ // Host send/recv instructions must have their own unique channel.
+ auto check_unique_host_channel = [&](const HloInstruction* instruction) {
+ const HloSendRecvInstruction* sendrecv =
+ DynCast<const HloSendRecvInstruction>(instruction);
+ if (sendrecv->is_host_transfer()) {
+ auto it_inserted =
+ host_channels.insert({sendrecv->channel_id(), sendrecv});
+ if (!it_inserted.second) {
+ return FailedPrecondition(
+ "Channel %lld is used for multiple host send/recv instructions: %s "
+ "and "
+ "%s",
+ sendrecv->channel_id(), sendrecv->ToString().c_str(),
+ it_inserted.first->second->ToString().c_str());
+ }
+ }
+
+ return Status::OK();
+ };
+
+ // Send/Recv instruction must have a single user: the corresponding
+ // SendDone/RecvDone. with matching channel.
+ for (const HloComputation* computation : module.computations()) {
+ for (const HloInstruction* instruction : computation->instructions()) {
+ switch (instruction->opcode()) {
+ case HloOpcode::kSend: {
+ TF_RETURN_IF_ERROR(check_unique_host_channel(instruction));
+ TF_RET_CHECK(instruction->users().size() == 1);
+ const HloInstruction* send_done = instruction->users().front();
+ TF_RET_CHECK(send_done->opcode() == HloOpcode::kSendDone);
+ TF_RETURN_IF_ERROR(CheckSameChannel(instruction, send_done));
+ TF_RETURN_IF_ERROR(CheckSameIsHostTransfer(instruction, send_done));
+ break;
+ }
+ case HloOpcode::kRecv: {
+ TF_RETURN_IF_ERROR(check_unique_host_channel(instruction));
+ TF_RET_CHECK(instruction->users().size() == 1);
+ const HloInstruction* recv_done = instruction->users().front();
+ TF_RET_CHECK(recv_done->opcode() == HloOpcode::kRecvDone);
+ TF_RETURN_IF_ERROR(CheckSameChannel(instruction, recv_done));
+ TF_RETURN_IF_ERROR(CheckSameIsHostTransfer(instruction, recv_done));
+ break;
+ }
+ case HloOpcode::kSendDone:
+ TF_RET_CHECK(instruction->operands().size() == 1);
+ TF_RET_CHECK(instruction->operand(0)->opcode() == HloOpcode::kSend);
+ break;
+ case HloOpcode::kRecvDone:
+ TF_RET_CHECK(instruction->operands().size() == 1);
+ TF_RET_CHECK(instruction->operand(0)->opcode() == HloOpcode::kRecv);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ return Status::OK();
+}
+
} // namespace
StatusOr<bool> HloVerifier::Run(HloModule* module) {
TF_RETURN_IF_ERROR(VerifyHloStructure(module));
+ TF_RETURN_IF_ERROR(VerifySendsAndRecvs(*module));
tensorflow::gtl::FlatMap<string, const HloInstruction*> instructions;
diff --git a/tensorflow/compiler/xla/service/hlo_verifier.h b/tensorflow/compiler/xla/service/hlo_verifier.h
index 9e62bdc8a9..79f7aa9f4c 100644
--- a/tensorflow/compiler/xla/service/hlo_verifier.h
+++ b/tensorflow/compiler/xla/service/hlo_verifier.h
@@ -37,6 +37,7 @@ class ShapeVerifier : public DfsHloVisitor {
Status HandleSelect(HloInstruction* select) override;
Status HandleTupleSelect(HloInstruction* tuple_select) override;
Status HandleConcatenate(HloInstruction* concatenate) override;
+ Status HandleIota(HloInstruction* iota) override;
Status HandleConvert(HloInstruction* convert) override;
Status HandleBitcastConvert(HloInstruction* convert) override;
Status HandleCopy(HloInstruction* copy) override;
@@ -102,10 +103,6 @@ class ShapeVerifier : public DfsHloVisitor {
Status CheckTernaryShape(const HloInstruction* instruction);
Status CheckVariadicShape(const HloInstruction* instruction);
- // Checks if the given two instructions share the same channel id.
- Status CheckSameChannel(const HloInstruction* instr1,
- const HloInstruction* instr2);
-
private:
// Whether the inputs and output of an instruction can contain both F32s and
// BF16s. Tuples that include both F32s and BF16s are allowed regardless of
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.cc b/tensorflow/compiler/xla/service/instruction_fusion.cc
index da91262130..af07370135 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.cc
+++ b/tensorflow/compiler/xla/service/instruction_fusion.cc
@@ -73,6 +73,7 @@ bool IsAlwaysDuplicable(const HloInstruction& instruction) {
case HloOpcode::kGt:
case HloOpcode::kImag:
case HloOpcode::kInfeed:
+ case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLe:
case HloOpcode::kLt:
diff --git a/tensorflow/compiler/xla/service/layout_assignment.cc b/tensorflow/compiler/xla/service/layout_assignment.cc
index fedc83c8f8..9705687b00 100644
--- a/tensorflow/compiler/xla/service/layout_assignment.cc
+++ b/tensorflow/compiler/xla/service/layout_assignment.cc
@@ -30,10 +30,12 @@ limitations under the License.
#include "tensorflow/compiler/xla/map_util.h"
#include "tensorflow/compiler/xla/ptr_util.h"
#include "tensorflow/compiler/xla/service/computation_layout.h"
+#include "tensorflow/compiler/xla/service/hlo_casting_utils.h"
#include "tensorflow/compiler/xla/service/hlo_computation.h"
#include "tensorflow/compiler/xla/service/hlo_dce.h"
#include "tensorflow/compiler/xla/service/hlo_graph_dumper.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/service/hlo_opcode.h"
#include "tensorflow/compiler/xla/service/logical_buffer.h"
#include "tensorflow/compiler/xla/service/tuple_simplifier.h"
@@ -59,7 +61,6 @@ namespace xla {
// anonymous namespace, instead of three or four spread all over this file.
namespace {
-
} // namespace
std::ostream& operator<<(std::ostream& out,
@@ -113,14 +114,18 @@ LayoutConstraints::LayoutConstraints(
HloComputation* computation)
: points_to_analysis_(points_to_analysis), computation_(computation) {
// Gather all array-shaped logical buffers into unconstrained_buffer_ids.
- for (LogicalBuffer::Id id = 0; id < points_to_analysis_.num_logical_buffers();
- id++) {
- auto& buffer = points_to_analysis_.logical_buffer(id);
- // The points to analysis is computed per module, restrict constraints to
- // array buffers in this computation.
- if (buffer.IsArray() && buffer.instruction()->parent() == computation) {
- unconstrained_buffer_ids_.insert(buffer.id());
- }
+ for (HloInstruction* inst : computation_->instructions()) {
+ points_to_analysis_.GetPointsToSet(inst).ForEachElement(
+ [&](const ShapeIndex&, const PointsToSet::BufferList& buffers) {
+ for (const LogicalBuffer* buffer : buffers) {
+ // The points to analysis is computed per module, restrict
+ // constraints to array buffers in this computation.
+ if (buffer->IsArray() &&
+ buffer->instruction()->parent() == computation) {
+ unconstrained_buffer_ids_.insert(buffer->id());
+ }
+ }
+ });
}
}
@@ -392,6 +397,43 @@ string LayoutConstraints::ToString() const {
return output;
}
+namespace {
+
+bool IsHostSendRecv(const HloInstruction* instruction) {
+ const HloSendRecvInstruction* send_recv_instr =
+ DynCast<HloSendRecvInstruction>(instruction);
+ return send_recv_instr != nullptr && send_recv_instr->is_host_transfer();
+}
+
+} // namespace
+
+Status LayoutAssignment::BuildHostChannelConstraints(
+ HloComputation* computation) {
+ for (auto* instruction : computation->instructions()) {
+ const HloSendRecvInstruction* send_recv_instr =
+ DynCast<HloSendRecvInstruction>(instruction);
+ if (send_recv_instr == nullptr || !send_recv_instr->is_host_transfer()) {
+ continue;
+ }
+
+ // For host transfers the Send and Recv instruction carry the layout.
+ if (instruction->opcode() == HloOpcode::kSend ||
+ instruction->opcode() == HloOpcode::kRecv) {
+ const Shape& data_shape =
+ ShapeUtil::GetTupleElementShape(send_recv_instr->shape(), 0);
+ TF_RET_CHECK(ShapeUtil::IsArray(data_shape));
+ TF_RET_CHECK(LayoutUtil::HasLayout(data_shape));
+ const Layout* prev_layout = host_channel_constraints_.ConstrainChannel(
+ send_recv_instr->channel_id(), data_shape.layout());
+ TF_RET_CHECK(prev_layout == nullptr)
+ << "Cannot constrain host transfer layout as it was set to "
+ << LayoutUtil::HumanString(*prev_layout) << ": "
+ << send_recv_instr->ToString();
+ }
+ }
+ return Status::OK();
+}
+
Status LayoutAssignment::AddMandatoryConstraints(
const ComputationLayout* computation_layout,
ChannelLayoutConstraints* channel_constraints, HloComputation* computation,
@@ -399,6 +441,11 @@ Status LayoutAssignment::AddMandatoryConstraints(
VLOG(3) << "Adding mandatory layout constraints to computation "
<< computation->name();
+ auto get_channel_constraints = [&](const HloInstruction* instruction) {
+ return IsHostSendRecv(instruction) ? &host_channel_constraints_
+ : channel_constraints;
+ };
+
// Constrain layouts of instructions which define values with pre-existing
// layouts.
for (auto* instruction : computation->instructions()) {
@@ -435,18 +482,21 @@ Status LayoutAssignment::AddMandatoryConstraints(
if (instruction->opcode() == HloOpcode::kSend ||
instruction->opcode() == HloOpcode::kRecv) {
- CHECK(channel_constraints)
+ CHECK(get_channel_constraints(instruction))
<< "Multi-module layout assignment requires ChannelLayoutConstraints";
int64 channel_id = instruction->channel_id();
- if (!channel_constraints->IsChannelConstrained(channel_id)) {
+ if (!get_channel_constraints(instruction)
+ ->IsChannelConstrained(channel_id)) {
continue;
}
if (instruction->opcode() == HloOpcode::kSend) {
// TODO(b/68493863): Change to use SetOperandLayout().
const Shape send_buffer_shape = instruction->operand(0)->shape();
TF_RET_CHECK(ShapeUtil::IsArray(send_buffer_shape));
- Shape new_buffer_shape = channel_constraints->LayoutShapeForChannel(
- send_buffer_shape, instruction->channel_id());
+ Shape new_buffer_shape =
+ get_channel_constraints(instruction)
+ ->LayoutShapeForChannel(send_buffer_shape,
+ instruction->channel_id());
TF_RETURN_IF_ERROR(constraints->SetInstructionLayout(
new_buffer_shape, instruction->operand(0)));
} else {
@@ -457,8 +507,9 @@ Status LayoutAssignment::AddMandatoryConstraints(
const LogicalBuffer* buffer,
constraints->points_to_analysis().GetBufferDefinedAt(instruction,
{0}));
- Shape new_shape = channel_constraints->LayoutShapeForChannel(
- recv_buffer_shape, instruction->channel_id());
+ Shape new_shape = get_channel_constraints(instruction)
+ ->LayoutShapeForChannel(
+ recv_buffer_shape, instruction->channel_id());
TF_RETURN_IF_ERROR(
constraints->SetBufferLayout(new_shape.layout(), *buffer));
}
@@ -1535,6 +1586,10 @@ Status LayoutAssignment::RunOnComputation(
ChannelLayoutConstraints* channel_constraints) {
VLOG(2) << "LayoutAssignment::RunOnComputation(" << computation->name()
<< ")";
+
+ // Must be run before clearing layouts.
+ TF_RETURN_IF_ERROR(BuildHostChannelConstraints(computation));
+
TF_RETURN_IF_ERROR(ClearComputationLayouts(computation));
if (computation_layout != nullptr) {
auto it = computation_layouts_.find(computation);
@@ -1624,14 +1679,20 @@ Status LayoutAssignment::RunOnComputation(
Status LayoutAssignment::ConstrainChannelLayouts(
HloComputation* computation,
ChannelLayoutConstraints* channel_constraints) {
+ auto get_channel_constraints = [&](const HloInstruction* instruction) {
+ return IsHostSendRecv(instruction) ? &host_channel_constraints_
+ : channel_constraints;
+ };
// We go through the kRecvDone before. These must either impose their layout,
- // of find a matching one already existing (ConstrainChannel() returns
+ // or find a matching one already existing (ConstrainChannel() returns
// nullptr).
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kRecvDone) {
- const Layout* layout = channel_constraints->ConstrainChannel(
- instruction->channel_id(),
- ShapeUtil::GetSubshape(instruction->shape(), {0}).layout());
+ const Layout* layout =
+ get_channel_constraints(instruction)
+ ->ConstrainChannel(
+ instruction->channel_id(),
+ ShapeUtil::GetSubshape(instruction->shape(), {0}).layout());
TF_RET_CHECK(layout == nullptr)
<< instruction->ToString()
<< " cannot constrain layout as it was set to "
@@ -1644,8 +1705,9 @@ Status LayoutAssignment::ConstrainChannelLayouts(
for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kSend) {
HloInstruction* operand = instruction->mutable_operand(0);
- const Layout* layout = channel_constraints->ConstrainChannel(
- instruction->channel_id(), operand->shape().layout());
+ const Layout* layout = get_channel_constraints(instruction)
+ ->ConstrainChannel(instruction->channel_id(),
+ operand->shape().layout());
if (layout != nullptr) {
// We found an already constrained layout which does not match the one
// the kSend wants to impose. Either add a new kCopy, or use the
diff --git a/tensorflow/compiler/xla/service/layout_assignment.h b/tensorflow/compiler/xla/service/layout_assignment.h
index b75ecb311a..f9e8dbea2f 100644
--- a/tensorflow/compiler/xla/service/layout_assignment.h
+++ b/tensorflow/compiler/xla/service/layout_assignment.h
@@ -488,6 +488,9 @@ class LayoutAssignment : public HloPassInterface {
}
}
+ // Adds constraints related to host Send/Recv instructions.
+ Status BuildHostChannelConstraints(HloComputation* computation);
+
// Map containing the layouts of all computations assigned so
// far. Computations are handled in a topological sort where computations are
// handled before their caller instructions so the layouts of caller
@@ -507,6 +510,10 @@ class LayoutAssignment : public HloPassInterface {
// computations/instructions.
ChannelLayoutConstraints channel_constraints_;
+ // Layout constraints for send/recv instructions which communicate with the
+ // host.
+ ChannelLayoutConstraints host_channel_constraints_;
+
// The set of HLO instructions which lacked any layout constraint, thus
// receiving propagated default layouts.
tensorflow::gtl::FlatSet<const HloInstruction*>
diff --git a/tensorflow/compiler/xla/service/llvm_ir/BUILD b/tensorflow/compiler/xla/service/llvm_ir/BUILD
index 6f1e04a1c6..0573304912 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/BUILD
+++ b/tensorflow/compiler/xla/service/llvm_ir/BUILD
@@ -164,9 +164,9 @@ cc_library(
)
cc_library(
- name = "ops",
- srcs = ["ops.cc"],
- hdrs = ["ops.h"],
+ name = "dynamic_update_slice_util",
+ srcs = ["dynamic_update_slice_util.cc"],
+ hdrs = ["dynamic_update_slice_util.h"],
deps = [
":fused_ir_emitter",
":ir_array",
@@ -181,6 +181,23 @@ cc_library(
)
cc_library(
+ name = "sort_util",
+ srcs = ["sort_util.cc"],
+ hdrs = ["sort_util.h"],
+ deps = [
+ ":ir_array",
+ ":llvm_loop",
+ ":llvm_util",
+ ":loop_emitter",
+ "//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla/service/gpu:parallel_loop_emitter",
+ "//tensorflow/compiler/xla/service/gpu:partition_assignment",
+ "//tensorflow/core:lib",
+ "@llvm//:core",
+ ],
+)
+
+cc_library(
name = "tuple_ops",
srcs = ["tuple_ops.cc"],
hdrs = ["tuple_ops.h"],
diff --git a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc
index 93a8c130e1..e5370eca56 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/alias_analysis.cc
@@ -28,8 +28,7 @@ namespace llvm_ir {
// Sentry allocation used to represent parameters of the entry computation in
// alias_scope_metadata_ and noalias_metadata_.
static const BufferAllocation* kParameterAllocation = new BufferAllocation(
- /*index=*/-1, /*size=*/0, /*is_thread_local=*/false, /*is_reusable=*/false,
- LogicalBuffer::Color(0));
+ /*index=*/-1, /*size=*/0, LogicalBuffer::Color(0));
void AliasAnalysis::AddAliasingInformationToIrArray(const HloInstruction& hlo,
llvm_ir::IrArray* array,
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ops.cc b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
index 3b298f4746..1bd73fc793 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ops.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include "tensorflow/compiler/xla/service/llvm_ir/ops.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h"
#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
#include "tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h"
@@ -38,16 +38,16 @@ bool CanUpdateDynamicSliceInPlace(HloInstruction* dynamic_update_slice,
// Emits a sequential loop if launch_dimensions is null.
static Status EmitDynamicUpdateSliceInPlaceImpl(
const Shape& update_shape, const ElementGenerator& start_indices_generator,
- ElementGenerator update_array_generator, const IrArray& output_array,
- const gpu::LaunchDimensions* launch_dimensions,
- tensorflow::StringPiece name, llvm::IRBuilder<>* ir_builder) {
+ bool is_signed, ElementGenerator update_array_generator,
+ const IrArray& output_array, const gpu::LaunchDimensions* launch_dimensions,
+ tensorflow::StringPiece name, llvm::IRBuilder<>* b) {
const Shape& output_shape = output_array.GetShape();
// Read start indices from start_indices_generator.
const int64 rank = ShapeUtil::Rank(output_shape);
- IrArray::Index start_index(ir_builder->getInt64Ty(), rank);
+ IrArray::Index start_index(b->getInt64Ty(), rank);
for (int64 i = 0; i < rank; ++i) {
- IrArray::Index dim_index({ir_builder->getInt64(i)});
+ IrArray::Index dim_index({b->getInt64(i)});
TF_ASSIGN_OR_RETURN(start_index[i], start_indices_generator(dim_index));
llvm::Value* output_dim_size = llvm::ConstantInt::get(
start_index[i]->getType(), output_shape.dimensions(i));
@@ -59,18 +59,20 @@ static Status EmitDynamicUpdateSliceInPlaceImpl(
// TODO(b/74360564): This is implementation defined behavior, but is
// currently respected by all implementations. Change this if we ever decide
- // to oficially document different behavior.
- llvm::Value* max_bound =
- ir_builder->CreateSub(output_dim_size, update_dim_size);
+ // to officially document different behavior.
+ llvm::Value* max_bound = b->CreateSub(output_dim_size, update_dim_size);
llvm::Value* zero = llvm::ConstantInt::get(start_index[i]->getType(), 0);
- start_index[i] = ir_builder->CreateSelect(
- ir_builder->CreateICmp(llvm::ICmpInst::ICMP_SGE, zero, start_index[i]),
- zero, start_index[i]);
-
- start_index[i] = ir_builder->CreateSelect(
- ir_builder->CreateICmp(llvm::ICmpInst::ICMP_SLE, max_bound,
- start_index[i]),
- max_bound, start_index[i]);
+ start_index[i] =
+ b->CreateSelect(b->CreateICmp(is_signed ? llvm::ICmpInst::ICMP_SGE
+ : llvm::ICmpInst::ICMP_UGE,
+ zero, start_index[i]),
+ zero, start_index[i]);
+
+ start_index[i] =
+ b->CreateSelect(b->CreateICmp(is_signed ? llvm::ICmpInst::ICMP_SLE
+ : llvm::ICmpInst::ICMP_ULE,
+ max_bound, start_index[i]),
+ max_bound, start_index[i]);
}
auto loop_body_emitter = [&](const IrArray::Index& update_index) -> Status {
@@ -81,31 +83,30 @@ static Status EmitDynamicUpdateSliceInPlaceImpl(
//
IrArray::Index output_index(start_index.GetType(), rank);
for (int64 i = 0; i < rank; ++i) {
- llvm::Value* start_index0 = ir_builder->CreateSExtOrBitCast(
- start_index[i], update_index[i]->getType());
- output_index[i] = ir_builder->CreateAdd(start_index0, update_index[i]);
+ llvm::Value* start_index0 =
+ b->CreateSExtOrBitCast(start_index[i], update_index[i]->getType());
+ output_index[i] = b->CreateAdd(start_index0, update_index[i]);
}
// Do output[output_index] = update[update_index].
TF_ASSIGN_OR_RETURN(llvm::Value * update_data,
update_array_generator(update_index));
- output_array.EmitWriteArrayElement(output_index, update_data, ir_builder);
+ output_array.EmitWriteArrayElement(output_index, update_data, b);
return Status::OK();
};
if (launch_dimensions != nullptr) {
return gpu::ParallelLoopEmitter(loop_body_emitter, update_shape,
- *launch_dimensions, ir_builder)
+ *launch_dimensions, b)
.EmitLoop(name);
}
- return LoopEmitter(loop_body_emitter, update_shape, ir_builder)
- .EmitLoop(name);
+ return LoopEmitter(loop_body_emitter, update_shape, b).EmitLoop(name);
}
Status EmitDynamicUpdateSliceInPlace(
tensorflow::gtl::ArraySlice<IrArray> operand_arrays,
const IrArray& output_array, tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
VLOG(2) << "EmitDynamicUpdateSliceInPlace for " << name;
// No need to use operand_arrays[0], the input array of the
@@ -116,15 +117,16 @@ Status EmitDynamicUpdateSliceInPlace(
Shape update_shape = update_array.GetShape();
ElementGenerator start_indices_generator = [&](const IrArray::Index& index) {
- return start_indices_array.EmitReadArrayElement(index, ir_builder);
+ return start_indices_array.EmitReadArrayElement(index, b);
};
ElementGenerator update_array_generator = [&](const IrArray::Index& index) {
- return update_array.EmitReadArrayElement(index, ir_builder);
+ return update_array.EmitReadArrayElement(index, b);
};
+ bool is_signed = ShapeUtil::ElementIsSigned(start_indices_array.GetShape());
return EmitDynamicUpdateSliceInPlaceImpl(
- update_shape, start_indices_generator, update_array_generator,
- output_array, /*launch_dimensions=*/nullptr, name, ir_builder);
+ update_shape, start_indices_generator, is_signed, update_array_generator,
+ output_array, /*launch_dimensions=*/nullptr, name, b);
}
// Shared implementation for EmitFusedDynamicUpdateSliceInPlace and
@@ -135,8 +137,7 @@ static Status EmitFusedDynamicUpdateSliceInPlaceImpl(
HloInstruction* fusion,
tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
- const gpu::LaunchDimensions* launch_dimensions,
- llvm::IRBuilder<>* ir_builder) {
+ const gpu::LaunchDimensions* launch_dimensions, llvm::IRBuilder<>* b) {
CHECK_EQ(fusion->opcode(), HloOpcode::kFusion);
VLOG(2) << "EmitFusedDynamicUpdateSliceInPlace for "
<< fusion->ToShortString();
@@ -170,30 +171,30 @@ static Status EmitFusedDynamicUpdateSliceInPlaceImpl(
ElementGenerator start_indices_generator =
fused_emitter.GetGenerator(start_indices);
+ bool is_signed = ShapeUtil::ElementIsSigned(start_indices->shape());
return EmitDynamicUpdateSliceInPlaceImpl(
- update_shape, start_indices_generator, update_array_generator,
- fusion_output_array, launch_dimensions, IrName(fusion), ir_builder);
+ update_shape, start_indices_generator, is_signed, update_array_generator,
+ fusion_output_array, launch_dimensions, IrName(fusion), b);
}
Status EmitFusedDynamicUpdateSliceInPlace(
HloInstruction* fusion,
tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
return EmitFusedDynamicUpdateSliceInPlaceImpl(
fusion, fusion_operand_arrays, fusion_output_array, elemental_emitter,
- /*launch_dimensions=*/nullptr, ir_builder);
+ /*launch_dimensions=*/nullptr, b);
}
Status EmitParallelFusedDynamicUpdateSliceInPlace(
HloInstruction* fusion,
tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
- const gpu::LaunchDimensions& launch_dimensions,
- llvm::IRBuilder<>* ir_builder) {
+ const gpu::LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b) {
return EmitFusedDynamicUpdateSliceInPlaceImpl(
fusion, fusion_operand_arrays, fusion_output_array, elemental_emitter,
- &launch_dimensions, ir_builder);
+ &launch_dimensions, b);
}
} // namespace llvm_ir
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ops.h b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h
index 175b081e84..3502577d23 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ops.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_OPS_H_
-#define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_OPS_H_
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_DYNAMIC_UPDATE_SLICE_UTIL_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_DYNAMIC_UPDATE_SLICE_UTIL_H_
#include "tensorflow/compiler/xla/service/buffer_assignment.h"
#include "tensorflow/compiler/xla/service/elemental_ir_emitter.h"
@@ -66,7 +66,7 @@ inline bool CanEmitFusedDynamicUpdateSliceInPlace(
Status EmitDynamicUpdateSliceInPlace(
tensorflow::gtl::ArraySlice<IrArray> operand_arrays,
const IrArray& output_array, tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Given a loop-fusion node whose root is a dynamic-update-slice op whose
// array-to-be-updated and output share the same buffer slice, emits
@@ -76,7 +76,7 @@ Status EmitFusedDynamicUpdateSliceInPlace(
HloInstruction* fusion,
tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Same as EmitFusedDynamicUpdateSliceInPlace, except emits a parallel loop with
// the given launch dimensions.
@@ -84,10 +84,9 @@ Status EmitParallelFusedDynamicUpdateSliceInPlace(
HloInstruction* fusion,
tensorflow::gtl::ArraySlice<IrArray> fusion_operand_arrays,
const IrArray& fusion_output_array, ElementalIrEmitter* elemental_emitter,
- const gpu::LaunchDimensions& launch_dimensions,
- llvm::IRBuilder<>* ir_builder);
+ const gpu::LaunchDimensions& launch_dimensions, llvm::IRBuilder<>* b);
} // namespace llvm_ir
} // namespace xla
-#endif // TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_OPS_H_
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_DYNAMIC_UPDATE_SLICE_UTIL_H_
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
index b12ce97e28..72ede377e1 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.cc
@@ -52,7 +52,7 @@ Status FusedIrEmitter::DefaultAction(HloInstruction* hlo) {
// that would be regenerated without caching. But this might increase the
// JIT compilation time.
if (generated_value_bb == nullptr ||
- generated_value_bb == ir_builder_->GetInsertBlock()) {
+ generated_value_bb == b_->GetInsertBlock()) {
VLOG(3) << "The cached generated value is reused.";
return generated_value;
}
@@ -60,8 +60,7 @@ Status FusedIrEmitter::DefaultAction(HloInstruction* hlo) {
"a different BB ("
<< llvm_ir::AsString(generated_value_bb->getName())
<< ") from the current insertion block ("
- << llvm_ir::AsString(ir_builder_->GetInsertBlock()->getName())
- << ").";
+ << llvm_ir::AsString(b_->GetInsertBlock()->getName()) << ").";
}
TF_ASSIGN_OR_RETURN(
@@ -77,14 +76,14 @@ Status FusedIrEmitter::HandleConstant(HloInstruction* constant) {
llvm::Constant* initializer =
llvm_ir::ConvertLiteralToIrConstant(literal, module_);
llvm::GlobalVariable* global = new llvm::GlobalVariable(
- *ir_builder_->GetInsertBlock()->getModule(), initializer->getType(),
+ *b_->GetInsertBlock()->getModule(), initializer->getType(),
/*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, initializer,
/*Name=*/"");
llvm::Constant* shape_constant = llvm::ConstantExpr::getBitCast(
global, llvm_ir::ShapeToIrType(literal.shape(), module_)->getPointerTo());
generators_[constant] = [=](const IrArray::Index& index) {
return IrArray(shape_constant, constant->shape())
- .EmitReadArrayElement(index, ir_builder_);
+ .EmitReadArrayElement(index, b_);
};
return Status::OK();
@@ -104,7 +103,7 @@ Status FusedIrEmitter::HandleGetTupleElement(
// Emit code to lookup tuple element pointer, and store it in 'gte_values_'.
llvm::Value* tuple_element_ptr = llvm_ir::EmitGetTupleElement(
get_tuple_element->shape(), get_tuple_element->tuple_index(),
- /*alignment=*/1, it->second, ir_builder_, module_);
+ /*alignment=*/1, it->second, b_, module_);
gte_values_.insert(std::make_pair(get_tuple_element, tuple_element_ptr));
// Emit code to read base tuple element array (if non-tuple shaped).
if (!ShapeUtil::IsTuple(get_tuple_element->shape())) {
@@ -112,7 +111,7 @@ Status FusedIrEmitter::HandleGetTupleElement(
[=](const IrArray::Index& index) -> StatusOr<llvm::Value*> {
// TODO(b/34080002) Add aliasing information to tuple element IrArray.
return IrArray(tuple_element_ptr, get_tuple_element->shape())
- .EmitReadArrayElement(index, ir_builder_);
+ .EmitReadArrayElement(index, b_);
};
}
return Status::OK();
@@ -129,16 +128,15 @@ Status FusedIrEmitter::HandleParameter(HloInstruction* parameter) {
// want the AA info to be present before address spaces are inferred
// (which is pretty late in the pipeline), so even if we had
// address-space-based AA in LLVM, it wouldn't help us much here.
- return ir_builder_->CreateLoad(
- ir_builder_->CreateGEP(
- param_tile_buffer,
- {index.GetConstantWithIndexType(0), tiled_parameter_info_->x(),
- tiled_parameter_info_->y()}),
+ return b_->CreateLoad(
+ b_->CreateGEP(param_tile_buffer, {index.GetConstantWithIndexType(0),
+ tiled_parameter_info_->x(),
+ tiled_parameter_info_->y()}),
"tiled_buffer");
}
}
return parameter_arrays_[parameter->parameter_number()]
- .EmitReadArrayElement(index, ir_builder_);
+ .EmitReadArrayElement(index, b_);
};
// Store ir value for fusion operand associated with fusion parameter to be
// accessed by subsequent fused GetTupleElement instructions.
@@ -157,11 +155,11 @@ Status FusedIrEmitter::HandleTuple(HloInstruction* tuple) {
}
generators_[tuple] =
[=](const IrArray::Index& index) -> StatusOr<llvm::Value*> {
- llvm::Value* ret = llvm::UndefValue::get(llvm::StructType::get(
- ir_builder_->getContext(), operand_elemental_ir_types));
+ llvm::Value* ret = llvm::UndefValue::get(
+ llvm::StructType::get(b_->getContext(), operand_elemental_ir_types));
for (size_t i = 0; i < ShapeUtil::TupleElementCount(tuple->shape()); ++i) {
TF_ASSIGN_OR_RETURN(llvm::Value * val_i, generators_[operands[i]](index));
- ret = ir_builder_->CreateInsertValue(ret, val_i, i);
+ ret = b_->CreateInsertValue(ret, val_i, i);
}
return ret;
};
diff --git a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
index a6ceec7b23..30471480c4 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/fused_ir_emitter.h
@@ -59,7 +59,7 @@ class FusedIrEmitter : public DfsHloVisitorWithDefault {
: parameter_arrays_(parameter_arrays),
tiled_parameter_info_(nullptr),
elemental_emitter_(elemental_emitter),
- ir_builder_(elemental_emitter->ir_builder()),
+ b_(elemental_emitter->b()),
module_(elemental_emitter->module()) {}
Status DefaultAction(HloInstruction* hlo) override;
@@ -103,7 +103,7 @@ class FusedIrEmitter : public DfsHloVisitorWithDefault {
const HloInstruction* fused_root_ = nullptr;
// Borrowed
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
llvm::Module* module_;
// Map from instruction pointers to functions to generate elements of their
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
index dcf9838d80..7a9170f379 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.cc
@@ -31,7 +31,7 @@ namespace llvm_ir {
void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
llvm::Value* linear, const Shape& shape,
- llvm::IRBuilder<>* ir_builder) const {
+ llvm::IRBuilder<>* b) const {
int64 divisor = 1;
const Layout& layout = shape.layout();
for (int64 i = 0; i < layout.minor_to_major_size(); ++i) {
@@ -48,10 +48,9 @@ void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
// useful because cuda-memcheck can't help us much in XLA: Most of our
// memory lives in one big allocation, so cuda-memcheck can't detect
// out-of-bounds accesses.
- auto* quot =
- ir_builder->CreateUDiv(linear, GetConstantWithIndexType(divisor));
+ auto* quot = b->CreateUDiv(linear, GetConstantWithIndexType(divisor));
if (i < layout.minor_to_major_size() - 1) {
- (*multidim)[dimension] = ir_builder->CreateURem(
+ (*multidim)[dimension] = b->CreateURem(
quot, GetConstantWithIndexType(size_of_current_dimension));
} else {
(*multidim)[dimension] = quot;
@@ -61,7 +60,7 @@ void IrArray::Index::Delinearize(std::vector<llvm::Value*>* multidim,
}
IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
- llvm::IRBuilder<>* ir_builder)
+ llvm::IRBuilder<>* b)
: multidim_(ShapeUtil::Rank(shape)),
linear_(linear),
layout_(shape.layout()),
@@ -71,7 +70,7 @@ IrArray::Index::Index(llvm::Value* linear, const Shape& shape,
CHECK(LayoutUtil::HasLayout(shape))
<< "Shape " << ShapeUtil::HumanStringWithLayout(shape)
<< " should have a layout.";
- Delinearize(&multidim_, linear, shape, ir_builder);
+ Delinearize(&multidim_, linear, shape, b);
}
IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
@@ -94,7 +93,7 @@ IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
}
IrArray::Index::Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
- const Shape& shape, llvm::IRBuilder<>* ir_builder)
+ const Shape& shape, llvm::IRBuilder<>* b)
: multidim_(multidim.begin(), multidim.end()),
layout_(shape.layout()),
dims_(shape.dimensions().begin(), shape.dimensions().end()) {
@@ -343,7 +342,7 @@ llvm::Value* IrArray::Index::Linearize(
}
llvm::Value* IrArray::EmitArrayElementAddress(
- const IrArray::Index& index, llvm::IRBuilder<>* ir_builder,
+ const IrArray::Index& index, llvm::IRBuilder<>* b,
tensorflow::StringPiece name) const {
if (ShapeUtil::IsScalar(*shape_)) {
// Special handling of scalars: a scalar pretends to have the same value for
@@ -354,12 +353,11 @@ llvm::Value* IrArray::EmitArrayElementAddress(
CHECK_EQ(index.size(), ShapeUtil::Rank(*shape_));
if (index.LinearValidOnShape(*shape_)) {
- llvm::Module* module =
- ir_builder->GetInsertBlock()->getParent()->getParent();
- return ir_builder->CreateInBoundsGEP(
- ir_builder->CreateBitCast(
- base_ptr_, PrimitiveTypeToIrType(shape_->element_type(), module)
- ->getPointerTo()),
+ llvm::Module* module = b->GetInsertBlock()->getParent()->getParent();
+ return b->CreateInBoundsGEP(
+ b->CreateBitCast(base_ptr_,
+ PrimitiveTypeToIrType(shape_->element_type(), module)
+ ->getPointerTo()),
{index.linear()}, llvm_ir::AsStringRef(name));
}
@@ -385,8 +383,8 @@ llvm::Value* IrArray::EmitArrayElementAddress(
int64 dimension = LayoutUtil::Major(shape_->layout(), i);
gep_indices.push_back(actual_index[dimension]);
}
- return ir_builder->CreateInBoundsGEP(base_ptr_, gep_indices,
- llvm_ir::AsStringRef(name));
+ return b->CreateInBoundsGEP(base_ptr_, gep_indices,
+ llvm_ir::AsStringRef(name));
}
void IrArray::AnnotateLoadStoreInstructionWithMetadata(
@@ -402,29 +400,27 @@ void IrArray::AnnotateLoadStoreInstructionWithMetadata(
}
llvm::Value* IrArray::EmitReadArrayElement(const Index& index,
- llvm::IRBuilder<>* ir_builder,
+ llvm::IRBuilder<>* b,
tensorflow::StringPiece name) const {
- llvm::Value* element_address =
- EmitArrayElementAddress(index, ir_builder, name);
- llvm::LoadInst* load = ir_builder->CreateLoad(element_address);
+ llvm::Value* element_address = EmitArrayElementAddress(index, b, name);
+ llvm::LoadInst* load = b->CreateLoad(element_address);
AnnotateLoadStoreInstructionWithMetadata(load);
return load;
}
void IrArray::EmitWriteArrayElement(const Index& index, llvm::Value* value,
- llvm::IRBuilder<>* ir_builder) const {
- llvm::Value* element_address = EmitArrayElementAddress(index, ir_builder);
- llvm::StoreInst* store = ir_builder->CreateStore(value, element_address);
+ llvm::IRBuilder<>* b) const {
+ llvm::Value* element_address = EmitArrayElementAddress(index, b);
+ llvm::StoreInst* store = b->CreateStore(value, element_address);
AnnotateLoadStoreInstructionWithMetadata(store);
}
IrArray IrArray::CastToShape(const Shape& new_shape,
- llvm::IRBuilder<>* ir_builder) const {
- llvm::Module* module = ir_builder->GetInsertBlock()->getParent()->getParent();
+ llvm::IRBuilder<>* b) const {
+ llvm::Module* module = b->GetInsertBlock()->getParent()->getParent();
llvm::Type* new_ir_type = llvm_ir::ShapeToIrType(new_shape, module);
IrArray new_irarray(
- ir_builder->CreatePointerCast(base_ptr_, new_ir_type->getPointerTo()),
- new_shape);
+ b->CreatePointerCast(base_ptr_, new_ir_type->getPointerTo()), new_shape);
new_irarray.metadata_ = metadata_;
return new_irarray;
}
@@ -432,9 +428,9 @@ IrArray IrArray::CastToShape(const Shape& new_shape,
/* static */ IrArray::Index IrArray::BumpIndex(const Index& index,
int64 which_dimension,
int64 addend,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
Index new_index = index;
- new_index[which_dimension] = ir_builder->CreateAdd(
+ new_index[which_dimension] = b->CreateAdd(
index[which_dimension],
llvm::ConstantInt::get(index[which_dimension]->getType(), addend), "",
/*HasNUW=*/true,
diff --git a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
index 0777c49923..28ca793e3e 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/ir_array.h
@@ -87,20 +87,19 @@ class IrArray {
}
// Constructs an index from linear index "linear" and computes the
- // multi-dimensional index from "linear" and "shape". "ir_builder" is the IR
+ // multi-dimensional index from "linear" and "shape". "b" is the IR
// builder to emit the index of each dimension in the multi-dimensional
// index.
//
// Precondition: "shape" has a layout.
- Index(llvm::Value* linear, const Shape& shape,
- llvm::IRBuilder<>* ir_builder);
+ Index(llvm::Value* linear, const Shape& shape, llvm::IRBuilder<>* b);
// Constructs an index from the given multi-dimensional index and the shape
// that it indexes into.
//
// Precondition: "shape" has a layout.
Index(tensorflow::gtl::ArraySlice<llvm::Value*> multidim,
- const Shape& shape, llvm::IRBuilder<>* ir_builder);
+ const Shape& shape, llvm::IRBuilder<>* b);
// Constructs an index from both a multi-dimensional index and a linear
// index. "shape" has the same meaning as that in the constructor that takes
@@ -191,7 +190,7 @@ class IrArray {
}
void Delinearize(std::vector<llvm::Value*>* multidim, llvm::Value* linear,
- const Shape& shape, llvm::IRBuilder<>* ir_builder) const;
+ const Shape& shape, llvm::IRBuilder<>* b) const;
std::vector<llvm::Value*> multidim_;
@@ -240,8 +239,7 @@ class IrArray {
//
// The optional name is useful for debugging when looking at
// the emitted LLVM IR.
- llvm::Value* EmitArrayElementAddress(const Index& index,
- llvm::IRBuilder<>* ir_builder,
+ llvm::Value* EmitArrayElementAddress(const Index& index, llvm::IRBuilder<>* b,
tensorflow::StringPiece name = "") const;
// Attach metadata this IrArray instance knows about to "instruction".
@@ -255,18 +253,16 @@ class IrArray {
//
// The optional name is useful for debugging when looking at
// the emitted LLVM IR.
- llvm::Value* EmitReadArrayElement(const Index& index,
- llvm::IRBuilder<>* ir_builder,
+ llvm::Value* EmitReadArrayElement(const Index& index, llvm::IRBuilder<>* b,
tensorflow::StringPiece name = "") const;
// Emit IR to write the given value to the array element at the given index.
void EmitWriteArrayElement(const Index& index, llvm::Value* value,
- llvm::IRBuilder<>* ir_builder) const;
+ llvm::IRBuilder<>* b) const;
// Returns a new IrArray whose shape is "new_shape" and base pointer is a
// bitcast of the base pointer of "this" IrArray.
- IrArray CastToShape(const Shape& new_shape,
- llvm::IRBuilder<>* ir_builder) const;
+ IrArray CastToShape(const Shape& new_shape, llvm::IRBuilder<>* b) const;
void AddAliasScopeMetadata(llvm::MDNode* alias_scope) {
CHECK_NE(alias_scope, nullptr);
@@ -312,7 +308,7 @@ class IrArray {
// Bumps the "which_dimension" value within the provided index by the provided
// addend.
static Index BumpIndex(const Index& index, int64 which_dimension,
- int64 addend, llvm::IRBuilder<>* ir_builder);
+ int64 addend, llvm::IRBuilder<>* b);
private:
// Add the specified LLVM IR metadata to loads/stores associated with this
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
index 98d0ceb3e2..b79567369a 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
@@ -22,9 +22,9 @@ Status KernelSupportLibrary::For(
tensorflow::StringPiece name, llvm::Value* start, llvm::Value* end,
llvm::Value* step,
const std::function<Status(llvm::Value*, bool)>& for_body_generator) {
- return If(ir_builder_->CreateICmpSLT(start, end), [&]() -> Status {
+ return If(b_->CreateICmpSLT(start, end), [&]() -> Status {
TF_RETURN_IF_ERROR(for_body_generator(start, /*is_first_iteration=*/true));
- return For(name, ir_builder_->CreateAdd(start, step), end, step,
+ return For(name, b_->CreateAdd(start, step), end, step,
[&](llvm::Value* iv) { return for_body_generator(iv, false); });
});
}
@@ -37,20 +37,20 @@ Status KernelSupportLibrary::For(
if (peel_first_iteration) {
return For(name, start, end, step, true,
[&](llvm::Value* indvar, bool is_first_iteration) -> Status {
- return for_body_generator(
- indvar, ir_builder_->getInt1(is_first_iteration));
+ return for_body_generator(indvar,
+ b_->getInt1(is_first_iteration));
});
} else {
std::unique_ptr<llvm_ir::ForLoop> loop = llvm_ir::ForLoop::EmitForLoop(
- name, start, end, step, ir_builder_,
+ name, start, end, step, b_,
/*unroll_mode=*/unroll_mode_,
/*prevent_vectorization=*/prevent_vectorization_);
- ir_builder_->SetInsertPoint(&loop->GetBodyBasicBlock()->back());
+ b_->SetInsertPoint(&loop->GetBodyBasicBlock()->back());
TF_RETURN_IF_ERROR(
for_body_generator(loop->GetIndVarValue(),
- /*is_first_iteration=*/ir_builder_->CreateICmpEQ(
+ /*is_first_iteration=*/b_->CreateICmpEQ(
loop->GetIndVarValue(), start)));
- llvm_ir::SetToLastInsertPoint(loop->GetExitBasicBlock(), ir_builder_);
+ llvm_ir::SetToLastInsertPoint(loop->GetExitBasicBlock(), b_);
return Status::OK();
}
}
@@ -59,23 +59,22 @@ Status KernelSupportLibrary::If(
tensorflow::StringPiece name, llvm::Value* condition,
const std::function<Status()>& true_block_generator,
const std::function<Status()>& false_block_generator) {
- llvm_ir::LlvmIfData if_data =
- llvm_ir::EmitIfThenElse(condition, name, ir_builder_);
- ir_builder_->SetInsertPoint(&if_data.true_block->back());
+ llvm_ir::LlvmIfData if_data = llvm_ir::EmitIfThenElse(condition, name, b_);
+ b_->SetInsertPoint(&if_data.true_block->back());
TF_RETURN_IF_ERROR(true_block_generator());
- ir_builder_->SetInsertPoint(&if_data.false_block->back());
+ b_->SetInsertPoint(&if_data.false_block->back());
TF_RETURN_IF_ERROR(false_block_generator());
- llvm_ir::SetToLastInsertPoint(if_data.after_block, ir_builder_);
+ llvm_ir::SetToLastInsertPoint(if_data.after_block, b_);
return Status::OK();
}
void KernelSupportLibrary::EmitAndCallOutlinedKernel(
- bool enable_fast_math, bool optimize_for_size,
- llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece kernel_name,
+ bool enable_fast_math, bool optimize_for_size, llvm::IRBuilder<>* b,
+ tensorflow::StringPiece kernel_name,
KernelSupportLibrary::ArgumentVector arguments,
const std::function<void(KernelSupportLibrary::ArgumentVector)>&
kernel_body_generator) {
- llvm::Module* module = ir_builder->GetInsertBlock()->getModule();
+ llvm::Module* module = b->GetInsertBlock()->getModule();
llvm::Function* function =
module->getFunction(llvm_ir::AsStringRef(kernel_name));
@@ -98,22 +97,22 @@ void KernelSupportLibrary::EmitAndCallOutlinedKernel(
std::back_inserter(arg_types),
[](llvm::Value* arg) { return arg->getType(); });
- auto* function_type = llvm::FunctionType::get(
- ir_builder->getVoidTy(), arg_types, /*isVarArg=*/false);
+ auto* function_type =
+ llvm::FunctionType::get(b->getVoidTy(), arg_types, /*isVarArg=*/false);
function = llvm_ir::CreateFunction(
function_type, llvm::GlobalValue::InternalLinkage,
/*enable_fast_math=*/enable_fast_math,
/*optimize_for_size=*/optimize_for_size, kernel_name, module);
- llvm::IRBuilder<>::InsertPointGuard guard(*ir_builder);
+ llvm::IRBuilder<>::InsertPointGuard guard(*b);
auto* entry_bb =
- llvm::BasicBlock::Create(ir_builder->getContext(), "entry", function);
- auto* return_inst = llvm::ReturnInst::Create(ir_builder->getContext(),
+ llvm::BasicBlock::Create(b->getContext(), "entry", function);
+ auto* return_inst = llvm::ReturnInst::Create(b->getContext(),
/*retVal=*/nullptr, entry_bb);
// Set the insert point to before return_inst.
- ir_builder->SetInsertPoint(return_inst);
+ b->SetInsertPoint(return_inst);
std::vector<llvm::Value*> arg_values;
/*
@@ -133,7 +132,7 @@ void KernelSupportLibrary::EmitAndCallOutlinedKernel(
VLOG(3) << "Re-using kernel for " << kernel_name;
}
- ir_builder->CreateCall(function, llvm_ir::AsArrayRef(sanitized_args));
+ b->CreateCall(function, llvm_ir::AsArrayRef(sanitized_args));
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
index 9d770cc4c3..b00f903d56 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
@@ -30,14 +30,14 @@ namespace xla {
// flow more readable.
class KernelSupportLibrary {
public:
- // `ir_builder` is the llvm::IRBuilder instance used to generate LLVM IR.
+ // `b` is the llvm::IRBuilder instance used to generate LLVM IR.
// `unroll_mode` specifies the desired LLVM unrolling behavior for every loop
// generated by this instance of KernelSupportLibrary.
explicit KernelSupportLibrary(
- llvm::IRBuilder<>* ir_builder,
+ llvm::IRBuilder<>* b,
llvm_ir::UnrollMode unroll_mode = llvm_ir::UnrollMode::kNoUnroll,
bool prevent_vectorization = true)
- : ir_builder_(ir_builder),
+ : b_(b),
unroll_mode_(unroll_mode),
prevent_vectorization_(prevent_vectorization) {}
@@ -71,18 +71,18 @@ class KernelSupportLibrary {
const std::function<Status(llvm::Value* ind_var,
bool is_first_iteration)>&
for_body_generator) {
- return For(name, /*start=*/ir_builder_->getInt64(start),
- /*end=*/ir_builder_->getInt64(end),
- /*step=*/ir_builder_->getInt64(step), for_body_generator);
+ return For(name, /*start=*/b_->getInt64(start),
+ /*end=*/b_->getInt64(end),
+ /*step=*/b_->getInt64(step), for_body_generator);
}
void ForReturnVoid(
tensorflow::StringPiece name, int64 start, int64 end, int64 step,
const std::function<void(llvm::Value* ind_var, bool is_first_iteration)>&
for_body_generator) {
- ForReturnVoid(name, /*start=*/ir_builder_->getInt64(start),
- /*end=*/ir_builder_->getInt64(end),
- /*step=*/ir_builder_->getInt64(step), for_body_generator);
+ ForReturnVoid(name, /*start=*/b_->getInt64(start),
+ /*end=*/b_->getInt64(end),
+ /*step=*/b_->getInt64(step), for_body_generator);
}
// Generates the following control flow structure if `peel_first_iteration` is
@@ -184,17 +184,17 @@ class KernelSupportLibrary {
Status For(
tensorflow::StringPiece name, int64 start, int64 end, int64 step,
const std::function<Status(llvm::Value* ind_var)>& for_body_generator) {
- return For(name, /*start=*/ir_builder_->getInt64(start),
- /*end=*/ir_builder_->getInt64(end),
- /*step=*/ir_builder_->getInt64(step), for_body_generator);
+ return For(name, /*start=*/b_->getInt64(start),
+ /*end=*/b_->getInt64(end),
+ /*step=*/b_->getInt64(step), for_body_generator);
}
void ForReturnVoid(
tensorflow::StringPiece name, int64 start, int64 end, int64 step,
const std::function<void(llvm::Value* ind_var)>& for_body_generator) {
- ForReturnVoid(name, /*start=*/ir_builder_->getInt64(start),
- /*end=*/ir_builder_->getInt64(end),
- /*step=*/ir_builder_->getInt64(step), for_body_generator);
+ ForReturnVoid(name, /*start=*/b_->getInt64(start),
+ /*end=*/b_->getInt64(end),
+ /*step=*/b_->getInt64(step), for_body_generator);
}
// Generates the following control flow structure:
@@ -258,41 +258,39 @@ class KernelSupportLibrary {
// in a nullptr llvm::Value* in its position to `kernel_body_generator`.
// Currently we only support at most one nullptr value in `arguments`.
static void EmitAndCallOutlinedKernel(
- bool enable_fast_math, bool optimize_for_size,
- llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece kernel_name,
- ArgumentVector arguments,
+ bool enable_fast_math, bool optimize_for_size, llvm::IRBuilder<>* b,
+ tensorflow::StringPiece kernel_name, ArgumentVector arguments,
const std::function<void(ArgumentVector)>& kernel_body_generator);
// Thin wrappers around the more general EmitAndCallOutlinedKernel above.
static void EmitAndCallOutlinedKernel(
- bool enable_fast_math, bool optimize_for_size,
- llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece kernel_name,
- llvm::Value* arg0, llvm::Value* arg1, llvm::Value* arg2,
+ bool enable_fast_math, bool optimize_for_size, llvm::IRBuilder<>* b,
+ tensorflow::StringPiece kernel_name, llvm::Value* arg0, llvm::Value* arg1,
+ llvm::Value* arg2,
const std::function<void(llvm::Value*, llvm::Value*, llvm::Value*)>&
kernel_body_generator) {
EmitAndCallOutlinedKernel(
- enable_fast_math, optimize_for_size, ir_builder, kernel_name,
- {arg0, arg1, arg2}, [&](ArgumentVector args) {
+ enable_fast_math, optimize_for_size, b, kernel_name, {arg0, arg1, arg2},
+ [&](ArgumentVector args) {
kernel_body_generator(args[0], args[1], args[2]);
});
}
static void EmitAndCallOutlinedKernel(
- bool enable_fast_math, bool optimize_for_size,
- llvm::IRBuilder<>* ir_builder, tensorflow::StringPiece kernel_name,
- llvm::Value* arg0, llvm::Value* arg1, llvm::Value* arg2,
- llvm::Value* arg3,
+ bool enable_fast_math, bool optimize_for_size, llvm::IRBuilder<>* b,
+ tensorflow::StringPiece kernel_name, llvm::Value* arg0, llvm::Value* arg1,
+ llvm::Value* arg2, llvm::Value* arg3,
const std::function<void(llvm::Value*, llvm::Value*, llvm::Value*,
llvm::Value*)>& kernel_body_generator) {
EmitAndCallOutlinedKernel(
- enable_fast_math, optimize_for_size, ir_builder, kernel_name,
+ enable_fast_math, optimize_for_size, b, kernel_name,
{arg0, arg1, arg2, arg3}, [&](ArgumentVector args) {
kernel_body_generator(args[0], args[1], args[2], args[3]);
});
}
private:
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
llvm_ir::UnrollMode unroll_mode_;
bool prevent_vectorization_;
};
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
index 533b75cdae..35b3941272 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.cc
@@ -94,24 +94,24 @@ tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(
IrArray::Index GetUnreducedOutputIndex(
const IrArray::Index& reduced_output_index,
const Shape& reduced_output_shape, const Shape& unreduced_output_shape,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
auto bounds = reduced_output_shape.dimensions();
auto minor_to_major = reduced_output_shape.layout().minor_to_major();
llvm::Value* linear_index = reduced_output_index.GetConstantWithIndexType(0);
int64 multiplier = 1;
for (int i = 0; i < reduced_output_index.size(); ++i) {
int64 dim = minor_to_major[i];
- llvm::Value* addend = ir_builder->CreateMul(
- reduced_output_index[dim],
- reduced_output_index.GetConstantWithIndexType(multiplier),
- "linearizing",
- /*HasNUW=*/true, /*HasNSW=*/true);
- linear_index = ir_builder->CreateAdd(linear_index, addend, "",
- /*HasNUW=*/true, /*HasNSW=*/true);
+ llvm::Value* addend =
+ b->CreateMul(reduced_output_index[dim],
+ reduced_output_index.GetConstantWithIndexType(multiplier),
+ "linearizing",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ linear_index = b->CreateAdd(linear_index, addend, "",
+ /*HasNUW=*/true, /*HasNSW=*/true);
multiplier *= bounds[dim];
}
- return IrArray::Index(linear_index, unreduced_output_shape, ir_builder);
+ return IrArray::Index(linear_index, unreduced_output_shape, b);
}
} // namespace llvm_ir
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
index 6f1268fffb..ccb9b8ba3e 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_tiling.h
@@ -44,7 +44,7 @@ tensorflow::gtl::optional<std::vector<int64> > FindTranspose021(const Shape& a,
IrArray::Index GetUnreducedOutputIndex(
const IrArray::Index& reduced_output_index,
const Shape& reduced_output_shape, const Shape& unreduced_output_shape,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// A class to represent information for tiled parameters to support IR emission
// for 021 transpose.
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
index c9ae7d3afd..ba7f94834c 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.cc
@@ -47,27 +47,27 @@ ForLoop::ForLoop(tensorflow::StringPiece prefix, tensorflow::StringPiece suffix,
/* static */ std::unique_ptr<ForLoop> ForLoop::EmitForLoop(
tensorflow::StringPiece prefix, llvm::Value* start_index,
- llvm::Value* end_index, llvm::Value* step, llvm::IRBuilder<>* ir_builder,
+ llvm::Value* end_index, llvm::Value* step, llvm::IRBuilder<>* b,
UnrollMode unroll_mode, bool prevent_vectorization) {
std::unique_ptr<ForLoop> loop(new ForLoop(prefix, /*suffix=*/"", start_index,
end_index, step, unroll_mode,
prevent_vectorization));
- loop->Emit(ir_builder);
+ loop->Emit(b);
return loop;
}
-void ForLoop::Emit(llvm::IRBuilder<>* ir_builder) {
+void ForLoop::Emit(llvm::IRBuilder<>* b) {
// The preheader block is the block the builder is currently emitting
// code into.
- preheader_bb_ = ir_builder->GetInsertBlock();
+ preheader_bb_ = b->GetInsertBlock();
- llvm::BasicBlock::iterator insert_point = ir_builder->GetInsertPoint();
+ llvm::BasicBlock::iterator insert_point = b->GetInsertPoint();
if (insert_point == preheader_bb_->end()) {
// We're emitting the loop at the end of a basic block. Verify there is no
// terminator (eg, branch) in the basic block.
CHECK_EQ(nullptr, preheader_bb_->getTerminator());
- exit_bb_ = CreateLoopBB("loop_exit", ir_builder);
+ exit_bb_ = CreateLoopBB("loop_exit", b);
} else {
// We're emitting the loop into the middle of a basic block. splitBasicBlock
// requires that this basic block be well-formed (have a terminator).
@@ -86,51 +86,50 @@ void ForLoop::Emit(llvm::IRBuilder<>* ir_builder) {
insert_before_bb_ = exit_bb_;
// Create remaining basic block which form the inside of the loop.
- header_bb_ = CreateLoopBB("loop_header", ir_builder);
- body_bb_ = CreateLoopBB("loop_body", ir_builder);
+ header_bb_ = CreateLoopBB("loop_header", b);
+ body_bb_ = CreateLoopBB("loop_body", b);
// Function entry basic block.
// Emit alloca for the induction variable. We do this at the entry to the
// basic block to ensure the alloc only executes once per function (we could
// be emitting a nested loop).
llvm::Function* func = preheader_bb_->getParent();
- ir_builder->SetInsertPoint(&func->getEntryBlock(),
- func->getEntryBlock().getFirstInsertionPt());
+ b->SetInsertPoint(&func->getEntryBlock(),
+ func->getEntryBlock().getFirstInsertionPt());
llvm::Value* indvar_address =
- ir_builder->CreateAlloca(start_index_->getType(), nullptr,
- AsStringRef(GetQualifiedName("invar_address")));
+ b->CreateAlloca(start_index_->getType(), nullptr,
+ AsStringRef(GetQualifiedName("invar_address")));
// Preheader basic block.
// Initialize induction variable starting index. Create branch to the header.
- ir_builder->SetInsertPoint(preheader_bb_);
- ir_builder->CreateStore(start_index_, indvar_address);
+ b->SetInsertPoint(preheader_bb_);
+ b->CreateStore(start_index_, indvar_address);
// The preheader should not have a branch yet.
CHECK_EQ(preheader_bb_->getTerminator(), nullptr);
- ir_builder->CreateBr(header_bb_);
+ b->CreateBr(header_bb_);
// Header basic block.
// Emit the loop conditional branch. Load and compare indvar with ending
// index and jump to loop exit if equal. Jump to body otherwise.
- ir_builder->SetInsertPoint(header_bb_);
- indvar_ = ir_builder->CreateLoad(indvar_address,
- AsStringRef(GetQualifiedName("indvar")));
- llvm::Value* exit_cond = ir_builder->CreateICmpUGE(indvar_, end_index_);
- ir_builder->CreateCondBr(/*Cond=*/exit_cond,
- /*True=*/exit_bb_, /*False=*/body_bb_);
+ b->SetInsertPoint(header_bb_);
+ indvar_ =
+ b->CreateLoad(indvar_address, AsStringRef(GetQualifiedName("indvar")));
+ llvm::Value* exit_cond = b->CreateICmpUGE(indvar_, end_index_);
+ b->CreateCondBr(/*Cond=*/exit_cond,
+ /*True=*/exit_bb_, /*False=*/body_bb_);
// Body basic block.
// Increment indvar, store indvar, and jump to header.
- ir_builder->SetInsertPoint(body_bb_);
+ b->SetInsertPoint(body_bb_);
llvm::Value* step = step_;
llvm::Value* indvar = indvar_;
- llvm::Value* indvar_inc =
- ir_builder->CreateAdd(indvar, step, "invar.inc",
- /*HasNUW=*/true, /*HasNSW=*/true);
- ir_builder->CreateStore(indvar_inc, indvar_address);
- llvm::BranchInst* back_branch = ir_builder->CreateBr(header_bb_);
+ llvm::Value* indvar_inc = b->CreateAdd(indvar, step, "invar.inc",
+ /*HasNUW=*/true, /*HasNSW=*/true);
+ b->CreateStore(indvar_inc, indvar_address);
+ llvm::BranchInst* back_branch = b->CreateBr(header_bb_);
- std::vector<llvm::Metadata*> loop_metadata = GetLoopMetadata(ir_builder);
+ std::vector<llvm::Metadata*> loop_metadata = GetLoopMetadata(b);
if (!loop_metadata.empty()) {
llvm::LLVMContext* ctx = &start_index_->getContext();
auto temp_node = llvm::MDNode::getTemporary(*ctx, llvm::None);
@@ -141,11 +140,10 @@ void ForLoop::Emit(llvm::IRBuilder<>* ir_builder) {
}
// Re-point the IR builder to the loop exit block.
- ir_builder->SetInsertPoint(exit_bb_);
+ b->SetInsertPoint(exit_bb_);
}
-std::vector<llvm::Metadata*> ForLoop::GetLoopMetadata(
- llvm::IRBuilder<>* ir_builder) {
+std::vector<llvm::Metadata*> ForLoop::GetLoopMetadata(llvm::IRBuilder<>* b) {
const char* const kLlvmLoopUnrollDisableMDName = "llvm.loop.unroll.disable";
const char* const kLlvmLoopUnrollFullMDName = "llvm.loop.unroll.full";
const char* const kLlvmLoopVectorizeMDName = "llvm.loop.vectorize.enable";
@@ -160,7 +158,7 @@ std::vector<llvm::Metadata*> ForLoop::GetLoopMetadata(
if (prevent_vectorization_) {
result.push_back(llvm::MDNode::get(
*ctx, {llvm::MDString::get(*ctx, kLlvmLoopVectorizeMDName),
- llvm::ConstantAsMetadata::get(ir_builder->getFalse())}));
+ llvm::ConstantAsMetadata::get(b->getFalse())}));
}
if (unroll_mode_ == xla::llvm_ir::UnrollMode::kFullyUnroll) {
@@ -175,9 +173,8 @@ string ForLoop::GetQualifiedName(tensorflow::StringPiece name) {
}
llvm::BasicBlock* ForLoop::CreateLoopBB(tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder) {
- return CreateBasicBlock(insert_before_bb_, GetQualifiedName(name),
- ir_builder);
+ llvm::IRBuilder<>* b) {
+ return CreateBasicBlock(insert_before_bb_, GetQualifiedName(name), b);
}
std::unique_ptr<ForLoop> ForLoopNest::AddLoop(tensorflow::StringPiece suffix,
@@ -197,12 +194,12 @@ std::unique_ptr<ForLoop> ForLoopNest::AddLoop(tensorflow::StringPiece suffix,
bool prevent_vectorization) {
if (inner_loop_body_bb_ != nullptr) {
// Create this loop inside the previous one.
- ir_builder_->SetInsertPoint(&*inner_loop_body_bb_->getFirstInsertionPt());
+ b_->SetInsertPoint(&*inner_loop_body_bb_->getFirstInsertionPt());
}
std::unique_ptr<ForLoop> loop(new ForLoop(
/*prefix=*/name_, suffix, start_index, end_index, stride, unroll_mode,
prevent_vectorization));
- loop->Emit(ir_builder_);
+ loop->Emit(b_);
if (outer_loop_preheader_bb_ == nullptr) {
outer_loop_preheader_bb_ = loop->GetPreheaderBasicBlock();
@@ -262,5 +259,35 @@ IrArray::Index ForLoopNest::AddLoopsForShapeOnDimensions(
return index;
}
+IrArray::Index ForLoopNest::EmitOperandArrayLoopNest(
+ const llvm_ir::IrArray& operand_array, int64 dimension_to_skip,
+ tensorflow::StringPiece name_suffix) {
+ // Prepares the dimension list we will use to emit the loop nest. Outermost
+ // loops are added first. Add loops in major-to-minor order, and skip the
+ // 'dimension_to_skip' dimension.
+ std::vector<int64> dimensions;
+ const Shape& shape = operand_array.GetShape();
+ for (int64 dimension : LayoutUtil::MinorToMajor(shape)) {
+ if (dimension != dimension_to_skip) {
+ dimensions.push_back(dimension);
+ }
+ }
+
+ // Create loop nest with one for-loop for each dimension of the
+ // output.
+ llvm_ir::IrArray::Index index =
+ AddLoopsForShapeOnDimensions(shape, dimensions, name_suffix);
+ // Verify every dimension except the 'dimension_to_skip' dimension was set in
+ // the index.
+ for (size_t dimension = 0; dimension < index.size(); ++dimension) {
+ if (dimension == dimension_to_skip) {
+ DCHECK_EQ(nullptr, index[dimension]);
+ } else {
+ DCHECK_NE(nullptr, index[dimension]);
+ }
+ }
+ return index;
+}
+
} // namespace llvm_ir
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
index 0dd5b9d3b2..a4fed5c8dc 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
@@ -79,7 +79,7 @@ class ForLoop {
// loop.
static std::unique_ptr<ForLoop> EmitForLoop(
tensorflow::StringPiece prefix, llvm::Value* start_index,
- llvm::Value* end_index, llvm::Value* step, llvm::IRBuilder<>* ir_builder,
+ llvm::Value* end_index, llvm::Value* step, llvm::IRBuilder<>* b,
UnrollMode unroll_mode = llvm_ir::UnrollMode::kDefaultUnroll,
bool prevent_vectorization = false);
@@ -138,10 +138,10 @@ class ForLoop {
UnrollMode unroll_mode, bool prevent_vectorization);
// Emit the loop at the insert point of the builder.
- void Emit(llvm::IRBuilder<>* ir_builder);
+ void Emit(llvm::IRBuilder<>* b);
llvm::BasicBlock* CreateLoopBB(tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Creates a name for an LLVM construct, appending prefix_ and suffix_, if
// they are set.
@@ -149,7 +149,7 @@ class ForLoop {
// Return a list of metadata nodes that should be associated with the
// llvm::Loop for this `ForLoop`.
- std::vector<llvm::Metadata*> GetLoopMetadata(llvm::IRBuilder<>* ir_builder);
+ std::vector<llvm::Metadata*> GetLoopMetadata(llvm::IRBuilder<>* b);
string prefix_;
string suffix_;
@@ -177,19 +177,18 @@ class ForLoop {
// A simple class for constructing nested for-loops.
class ForLoopNest {
public:
- explicit ForLoopNest(llvm::IRBuilder<>* ir_builder,
- llvm::Type* index_ty = nullptr)
- : ForLoopNest(/*name=*/"", ir_builder) {
+ explicit ForLoopNest(llvm::IRBuilder<>* b, llvm::Type* index_ty = nullptr)
+ : ForLoopNest(/*name=*/"", b) {
SetIndexType(index_ty);
}
- ForLoopNest(tensorflow::StringPiece name, llvm::IRBuilder<>* ir_builder,
+ ForLoopNest(tensorflow::StringPiece name, llvm::IRBuilder<>* b,
llvm::Type* index_ty = nullptr)
: name_(std::string(name)),
outer_loop_preheader_bb_(nullptr),
outer_loop_exit_bb_(nullptr),
inner_loop_body_bb_(nullptr),
- ir_builder_(ir_builder) {
+ b_(b) {
SetIndexType(index_ty);
}
@@ -248,6 +247,17 @@ class ForLoopNest {
const Shape& shape, tensorflow::gtl::ArraySlice<int64> dimensions,
tensorflow::StringPiece suffix);
+ // Emits a series of nested loops for iterating over an operand array. Loops
+ // are constructed in major to minor dimension layout order. No loop is
+ // emitted for the given 'dimension_to_skip'. The function returns an IrArray
+ // index for the given operand_array containing the indvars of the loops. All
+ // dimensions of the index are filled except for 'dimension_to_skip'.
+ // name_suffix is the string to append to the names of LLVM constructs (eg,
+ // basic blocks) constructed by this method.
+ IrArray::Index EmitOperandArrayLoopNest(const llvm_ir::IrArray& operand_array,
+ int64 dimension_to_skip,
+ tensorflow::StringPiece name_suffix);
+
// Convenience methods which return particular basic blocks of the outermost
// or innermost loops. These methods return nullptr if no loops have been
// added yet.
@@ -259,7 +269,7 @@ class ForLoopNest {
private:
void SetIndexType(llvm::Type* index_ty) {
- index_type_ = index_ty == nullptr ? ir_builder_->getInt64Ty() : index_ty;
+ index_type_ = index_ty == nullptr ? b_->getInt64Ty() : index_ty;
}
llvm::Constant* GetConstantWithIndexType(int64 c) const {
@@ -278,7 +288,7 @@ class ForLoopNest {
// has been added yet.
llvm::BasicBlock* inner_loop_body_bb_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
llvm::Type* index_type_;
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
index 6c55361b44..e4f65bd427 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
@@ -48,8 +48,8 @@ namespace {
// Note, this function is only useful in an insertion context; in a global
// (e.g. constants) context it will CHECK fail.
-llvm::Module* ModuleFromIRBuilder(llvm::IRBuilder<>* ir_builder) {
- auto block = CHECK_NOTNULL(ir_builder->GetInsertBlock());
+llvm::Module* ModuleFromIRBuilder(llvm::IRBuilder<>* b) {
+ auto block = CHECK_NOTNULL(b->GetInsertBlock());
auto fn = CHECK_NOTNULL(block->getParent());
auto module = CHECK_NOTNULL(fn->getParent());
return module;
@@ -87,41 +87,41 @@ llvm::Value* EmitCallToIntrinsic(
llvm::Intrinsic::ID intrinsic_id,
tensorflow::gtl::ArraySlice<llvm::Value*> operands,
tensorflow::gtl::ArraySlice<llvm::Type*> overloaded_types,
- llvm::IRBuilder<>* ir_builder) {
- llvm::Module* module = ModuleFromIRBuilder(ir_builder);
+ llvm::IRBuilder<>* b) {
+ llvm::Module* module = ModuleFromIRBuilder(b);
llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration(
module, intrinsic_id, AsArrayRef(overloaded_types));
- return ir_builder->CreateCall(intrinsic, AsArrayRef(operands));
+ return b->CreateCall(intrinsic, AsArrayRef(operands));
}
llvm::Value* EmitFloatMax(llvm::Value* lhs_value, llvm::Value* rhs_value,
- llvm::IRBuilder<>* ir_builder) {
- if (ir_builder->getFastMathFlags().noNaNs()) {
- auto cmp = ir_builder->CreateFCmpUGE(lhs_value, rhs_value);
- return ir_builder->CreateSelect(cmp, lhs_value, rhs_value);
+ llvm::IRBuilder<>* b) {
+ if (b->getFastMathFlags().noNaNs()) {
+ auto cmp = b->CreateFCmpUGE(lhs_value, rhs_value);
+ return b->CreateSelect(cmp, lhs_value, rhs_value);
} else {
- auto cmp_ge = ir_builder->CreateFCmpOGE(lhs_value, rhs_value);
- auto lhs_is_nan = ir_builder->CreateFCmpUNE(lhs_value, lhs_value);
- auto sel_lhs = ir_builder->CreateOr(cmp_ge, lhs_is_nan);
- return ir_builder->CreateSelect(sel_lhs, lhs_value, rhs_value);
+ auto cmp_ge = b->CreateFCmpOGE(lhs_value, rhs_value);
+ auto lhs_is_nan = b->CreateFCmpUNE(lhs_value, lhs_value);
+ auto sel_lhs = b->CreateOr(cmp_ge, lhs_is_nan);
+ return b->CreateSelect(sel_lhs, lhs_value, rhs_value);
}
}
llvm::Value* EmitFloatMin(llvm::Value* lhs_value, llvm::Value* rhs_value,
- llvm::IRBuilder<>* ir_builder) {
- if (ir_builder->getFastMathFlags().noNaNs()) {
- auto cmp = ir_builder->CreateFCmpULE(lhs_value, rhs_value);
- return ir_builder->CreateSelect(cmp, lhs_value, rhs_value);
+ llvm::IRBuilder<>* b) {
+ if (b->getFastMathFlags().noNaNs()) {
+ auto cmp = b->CreateFCmpULE(lhs_value, rhs_value);
+ return b->CreateSelect(cmp, lhs_value, rhs_value);
} else {
- auto cmp_le = ir_builder->CreateFCmpOLE(lhs_value, rhs_value);
- auto lhs_is_nan = ir_builder->CreateFCmpUNE(lhs_value, lhs_value);
- auto sel_lhs = ir_builder->CreateOr(cmp_le, lhs_is_nan);
- return ir_builder->CreateSelect(sel_lhs, lhs_value, rhs_value);
+ auto cmp_le = b->CreateFCmpOLE(lhs_value, rhs_value);
+ auto lhs_is_nan = b->CreateFCmpUNE(lhs_value, lhs_value);
+ auto sel_lhs = b->CreateOr(cmp_le, lhs_is_nan);
+ return b->CreateSelect(sel_lhs, lhs_value, rhs_value);
}
}
llvm::Value* EmitBufferIndexingGEP(llvm::Value* array, llvm::Value* index,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
llvm::Type* array_type = array->getType();
CHECK(array_type->isPointerTy());
llvm::PointerType* array_type_as_pointer =
@@ -131,16 +131,16 @@ llvm::Value* EmitBufferIndexingGEP(llvm::Value* array, llvm::Value* index,
<< " array=" << llvm_ir::DumpToString(*array)
<< " index=" << llvm_ir::DumpToString(*index);
- return ir_builder->CreateInBoundsGEP(
+ return b->CreateInBoundsGEP(
array_type_as_pointer->getElementType(), array,
llvm::isa<llvm::GlobalVariable>(array)
- ? llvm::ArrayRef<llvm::Value*>({ir_builder->getInt64(0), index})
+ ? llvm::ArrayRef<llvm::Value*>({b->getInt64(0), index})
: index);
}
llvm::Value* EmitBufferIndexingGEP(llvm::Value* array, int64 index,
- llvm::IRBuilder<>* ir_builder) {
- return EmitBufferIndexingGEP(array, ir_builder->getInt64(index), ir_builder);
+ llvm::IRBuilder<>* b) {
+ return EmitBufferIndexingGEP(array, b->getInt64(index), b);
}
llvm::Type* PrimitiveTypeToIrType(PrimitiveType element_type,
@@ -232,14 +232,15 @@ llvm::Type* ShapeToIrType(const Shape& shape, llvm::Module* module) {
return result_type;
}
-StatusOr<llvm::Value*> EncodeSelfDescribingShapeConstant(
- const Shape& shape, int32* shape_size, llvm::IRBuilder<>* ir_builder) {
+StatusOr<llvm::Value*> EncodeSelfDescribingShapeConstant(const Shape& shape,
+ int32* shape_size,
+ llvm::IRBuilder<>* b) {
string encoded_shape = shape.SerializeAsString();
if (encoded_shape.size() > std::numeric_limits<int32>::max()) {
return InternalError("Encoded shape size exceeded int32 size limit.");
}
*shape_size = static_cast<int32>(encoded_shape.size());
- return ir_builder->CreateGlobalStringPtr(llvm_ir::AsStringRef(encoded_shape));
+ return b->CreateGlobalStringPtr(llvm_ir::AsStringRef(encoded_shape));
}
StatusOr<Shape> DecodeSelfDescribingShapeConstant(const void* shape_ptr,
@@ -262,59 +263,57 @@ llvm::Constant* ConvertLiteralToIrConstant(const Literal& literal,
llvm::AllocaInst* EmitAllocaAtFunctionEntry(llvm::Type* type,
tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder,
+ llvm::IRBuilder<>* b,
int alignment) {
- return EmitAllocaAtFunctionEntryWithCount(type, nullptr, name, ir_builder,
- alignment);
+ return EmitAllocaAtFunctionEntryWithCount(type, nullptr, name, b, alignment);
}
llvm::AllocaInst* EmitAllocaAtFunctionEntryWithCount(
llvm::Type* type, llvm::Value* element_count, tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder, int alignment) {
- llvm::IRBuilder<>::InsertPoint insert_point = ir_builder->saveIP();
- llvm::Function* function = ir_builder->GetInsertBlock()->getParent();
- ir_builder->SetInsertPoint(&function->getEntryBlock(),
- function->getEntryBlock().getFirstInsertionPt());
+ llvm::IRBuilder<>* b, int alignment) {
+ llvm::IRBuilder<>::InsertPoint insert_point = b->saveIP();
+ llvm::Function* function = b->GetInsertBlock()->getParent();
+ b->SetInsertPoint(&function->getEntryBlock(),
+ function->getEntryBlock().getFirstInsertionPt());
llvm::AllocaInst* alloca =
- ir_builder->CreateAlloca(type, element_count, AsStringRef(name));
+ b->CreateAlloca(type, element_count, AsStringRef(name));
if (alignment != 0) {
alloca->setAlignment(alignment);
}
- ir_builder->restoreIP(insert_point);
+ b->restoreIP(insert_point);
return alloca;
}
llvm::BasicBlock* CreateBasicBlock(llvm::BasicBlock* insert_before,
tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
return llvm::BasicBlock::Create(
- /*Context=*/ir_builder->getContext(),
+ /*Context=*/b->getContext(),
/*Name=*/AsStringRef(name),
- /*Parent=*/ir_builder->GetInsertBlock()->getParent(),
+ /*Parent=*/b->GetInsertBlock()->getParent(),
/*InsertBefore*/ insert_before);
}
LlvmIfData EmitIfThenElse(llvm::Value* condition, tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder, bool emit_else) {
+ llvm::IRBuilder<>* b, bool emit_else) {
llvm_ir::LlvmIfData if_data;
- if_data.if_block = ir_builder->GetInsertBlock();
- if_data.true_block = CreateBasicBlock(
- nullptr, tensorflow::strings::StrCat(name, "-true"), ir_builder);
+ if_data.if_block = b->GetInsertBlock();
+ if_data.true_block =
+ CreateBasicBlock(nullptr, tensorflow::strings::StrCat(name, "-true"), b);
if_data.false_block =
- emit_else ? CreateBasicBlock(nullptr,
- tensorflow::strings::StrCat(name, "-false"),
- ir_builder)
+ emit_else ? CreateBasicBlock(
+ nullptr, tensorflow::strings::StrCat(name, "-false"), b)
: nullptr;
// Add a terminator to the if block, if necessary.
if (if_data.if_block->getTerminator() == nullptr) {
- ir_builder->SetInsertPoint(if_data.if_block);
+ b->SetInsertPoint(if_data.if_block);
if_data.after_block = CreateBasicBlock(
- nullptr, tensorflow::strings::StrCat(name, "-after"), ir_builder);
- ir_builder->CreateBr(if_data.after_block);
+ nullptr, tensorflow::strings::StrCat(name, "-after"), b);
+ b->CreateBr(if_data.after_block);
} else {
if_data.after_block = if_data.if_block->splitBasicBlock(
- ir_builder->GetInsertPoint(),
+ b->GetInsertPoint(),
AsStringRef(tensorflow::strings::StrCat(name, "-after")));
}
@@ -322,39 +321,37 @@ LlvmIfData EmitIfThenElse(llvm::Value* condition, tensorflow::StringPiece name,
// we're going to replace it with a conditional branch.
if_data.if_block->getTerminator()->eraseFromParent();
- ir_builder->SetInsertPoint(if_data.if_block);
- ir_builder->CreateCondBr(
- condition, if_data.true_block,
- emit_else ? if_data.false_block : if_data.after_block);
+ b->SetInsertPoint(if_data.if_block);
+ b->CreateCondBr(condition, if_data.true_block,
+ emit_else ? if_data.false_block : if_data.after_block);
- ir_builder->SetInsertPoint(if_data.true_block);
- ir_builder->CreateBr(if_data.after_block);
+ b->SetInsertPoint(if_data.true_block);
+ b->CreateBr(if_data.after_block);
if (emit_else) {
- ir_builder->SetInsertPoint(if_data.false_block);
- ir_builder->CreateBr(if_data.after_block);
+ b->SetInsertPoint(if_data.false_block);
+ b->CreateBr(if_data.after_block);
}
- ir_builder->SetInsertPoint(if_data.after_block,
- if_data.after_block->getFirstInsertionPt());
+ b->SetInsertPoint(if_data.after_block,
+ if_data.after_block->getFirstInsertionPt());
return if_data;
}
llvm::Value* EmitComparison(llvm::CmpInst::Predicate predicate,
llvm::Value* lhs_value, llvm::Value* rhs_value,
- llvm::IRBuilder<>* ir_builder) {
+ llvm::IRBuilder<>* b) {
llvm::Value* comparison_result;
if (lhs_value->getType()->isIntegerTy()) {
- comparison_result = ir_builder->CreateICmp(predicate, lhs_value, rhs_value);
+ comparison_result = b->CreateICmp(predicate, lhs_value, rhs_value);
} else {
- comparison_result = ir_builder->CreateFCmp(predicate, lhs_value, rhs_value);
+ comparison_result = b->CreateFCmp(predicate, lhs_value, rhs_value);
}
// comparison_result is i1, but the NVPTX codegen incorrectly lowers i1
// arrays. So we extend it to i8 so that it's addressable.
- return ir_builder->CreateZExt(
- comparison_result,
- llvm_ir::PrimitiveTypeToIrType(PRED, ModuleFromIRBuilder(ir_builder)));
+ return b->CreateZExt(comparison_result, llvm_ir::PrimitiveTypeToIrType(
+ PRED, ModuleFromIRBuilder(b)));
}
// Internal helper that is called from emitted code to log an int64 value with a
@@ -363,17 +360,14 @@ static void LogS64(const char* tag, int64 value) {
LOG(INFO) << tag << " (int64): " << value;
}
-void EmitLogging(const char* tag, llvm::Value* value,
- llvm::IRBuilder<>* ir_builder) {
+void EmitLogging(const char* tag, llvm::Value* value, llvm::IRBuilder<>* b) {
llvm::FunctionType* log_function_type = llvm::FunctionType::get(
- ir_builder->getVoidTy(),
- {ir_builder->getInt64Ty(), ir_builder->getInt64Ty()}, /*isVarArg=*/false);
- ir_builder->CreateCall(
+ b->getVoidTy(), {b->getInt64Ty(), b->getInt64Ty()}, /*isVarArg=*/false);
+ b->CreateCall(
log_function_type,
- ir_builder->CreateIntToPtr(
- ir_builder->getInt64(tensorflow::bit_cast<int64>(&LogS64)),
- log_function_type->getPointerTo()),
- {ir_builder->getInt64(tensorflow::bit_cast<int64>(tag)), value});
+ b->CreateIntToPtr(b->getInt64(tensorflow::bit_cast<int64>(&LogS64)),
+ log_function_type->getPointerTo()),
+ {b->getInt64(tensorflow::bit_cast<int64>(tag)), value});
}
void SetAlignmentMetadataForLoad(llvm::LoadInst* load, uint64_t alignment) {
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
index 9c51861eac..d8746ffe01 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.h
@@ -105,26 +105,26 @@ llvm::Value* EmitCallToIntrinsic(
llvm::Intrinsic::ID intrinsic_id,
tensorflow::gtl::ArraySlice<llvm::Value*> operands,
tensorflow::gtl::ArraySlice<llvm::Type*> overloaded_types,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Emit float max. Emit maxnum intrinsic is fast math is disabled, or
// fcmp+select otherwise
llvm::Value* EmitFloatMax(llvm::Value* lhs_value, llvm::Value* rhs_value,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Emit float min. Emit minnum intrinsic is fast math is disabled, or
// fcmp+select otherwise
llvm::Value* EmitFloatMin(llvm::Value* lhs_value, llvm::Value* rhs_value,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Convenience methods for emitting a GEP instruction that indexes into a buffer
// (1-dimensional array), equivalent to array[index]. The type is automatically
// determined from the element type of the array. The int64 index overload
// wraps the index in a i64 llvm::Value.
llvm::Value* EmitBufferIndexingGEP(llvm::Value* array, llvm::Value* index,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
llvm::Value* EmitBufferIndexingGEP(llvm::Value* array, int64 index,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Returns the LLVM type which represents the given XLA primitive type.
llvm::Type* PrimitiveTypeToIrType(PrimitiveType element_type,
@@ -139,8 +139,9 @@ llvm::Type* ShapeToIrType(const Shape& shape, llvm::Module* module);
// Returns a value that represents a pointer to a global string constant that
// encodes the shape as a serialized protobuf.
-StatusOr<llvm::Value*> EncodeSelfDescribingShapeConstant(
- const Shape& shape, int32* shape_size, llvm::IRBuilder<>* ir_builder);
+StatusOr<llvm::Value*> EncodeSelfDescribingShapeConstant(const Shape& shape,
+ int32* shape_size,
+ llvm::IRBuilder<>* b);
// Inverses the encoding of a Shape protobuf into an LLVM global variable.
//
@@ -164,21 +165,21 @@ llvm::Constant* ConvertLiteralToIrConstant(const Literal& literal,
// through a loop.
llvm::AllocaInst* EmitAllocaAtFunctionEntry(llvm::Type* type,
tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder,
+ llvm::IRBuilder<>* b,
int alignment = 0);
// As EmitAllocaAtFunctionEntry, but allocates element_count entries
// instead of a single element.
llvm::AllocaInst* EmitAllocaAtFunctionEntryWithCount(
llvm::Type* type, llvm::Value* element_count, tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder, int alignment = 0);
+ llvm::IRBuilder<>* b, int alignment = 0);
// Creates a basic block with the same context and function as for the
// builder. Inserts at the end of the function if insert_before is
// null.
llvm::BasicBlock* CreateBasicBlock(llvm::BasicBlock* insert_before,
tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Struct with data on a conditional branch in a diamond shape created
// via EmitIfThenElse.
@@ -210,13 +211,13 @@ struct LlvmIfData {
// block with a terminator. If you need to use this for a
// non-terminated block, just make the function able to do that too.
LlvmIfData EmitIfThenElse(llvm::Value* condition, tensorflow::StringPiece name,
- llvm::IRBuilder<>* ir_builder, bool emit_else = true);
+ llvm::IRBuilder<>* b, bool emit_else = true);
// Emits a compare operation between "lhs" and "rhs" with the given predicate,
// and then converts the result to i8 so that it is addressable.
llvm::Value* EmitComparison(llvm::CmpInst::Predicate predicate,
llvm::Value* lhs, llvm::Value* rhs,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Emits a call that logs the given value with the given tag as a prefix.
// The provided tag and value are passed to a runtime logging call that is
@@ -228,8 +229,7 @@ llvm::Value* EmitComparison(llvm::CmpInst::Predicate predicate,
// Precondition: value must be an int64.
// Precondition: tag must be a stable pointer for the lifetime of the generated
// program (the constant pointer is burned in to the program).
-void EmitLogging(const char* tag, llvm::Value* value,
- llvm::IRBuilder<>* ir_builder);
+void EmitLogging(const char* tag, llvm::Value* value, llvm::IRBuilder<>* b);
// Adds alignment metadata to a load instruction using the given alignment.
// The alignment refers to the result of the load, not the load itself.
diff --git a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
index e8b0605b9d..36f5fa1952 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
@@ -33,26 +33,24 @@ namespace xla {
namespace llvm_ir {
LoopEmitter::LoopEmitter(const BodyEmitter& body_emitter, const Shape& shape,
- llvm::IRBuilder<>* ir_builder)
- : body_emitter_(body_emitter), shape_(shape), ir_builder_(ir_builder) {}
+ llvm::IRBuilder<>* b)
+ : body_emitter_(body_emitter), shape_(shape), b_(b) {}
LoopEmitter::LoopEmitter(const ElementGenerator& target_element_generator,
- const IrArray& target_array,
- llvm::IRBuilder<>* ir_builder)
+ const IrArray& target_array, llvm::IRBuilder<>* b)
: body_emitter_([=](const llvm_ir::IrArray::Index array_index) -> Status {
// Convert target_element_generator to a BodyEmitter.
TF_ASSIGN_OR_RETURN(llvm::Value * target_element,
target_element_generator(array_index));
- target_array.EmitWriteArrayElement(array_index, target_element,
- ir_builder);
+ target_array.EmitWriteArrayElement(array_index, target_element, b);
return Status::OK();
}),
shape_(target_array.GetShape()),
- ir_builder_(ir_builder) {}
+ b_(b) {}
static LoopEmitter::BodyEmitter MakeBodyEmitterForMultiOutputFusion(
const ElementGenerator& target_element_generator,
- const std::vector<IrArray>& target_arrays, llvm::IRBuilder<>* ir_builder) {
+ const std::vector<IrArray>& target_arrays, llvm::IRBuilder<>* b) {
return [=](const llvm_ir::IrArray::Index array_index) {
TF_ASSIGN_OR_RETURN(llvm::Value * target_element,
target_element_generator(array_index));
@@ -64,8 +62,7 @@ static LoopEmitter::BodyEmitter MakeBodyEmitterForMultiOutputFusion(
for (int64 i = 0; i < target_arrays.size(); ++i) {
target_arrays[i].EmitWriteArrayElement(
- array_index, ir_builder->CreateExtractValue(target_element, i),
- ir_builder);
+ array_index, b->CreateExtractValue(target_element, i), b);
}
return Status::OK();
};
@@ -73,13 +70,12 @@ static LoopEmitter::BodyEmitter MakeBodyEmitterForMultiOutputFusion(
LoopEmitter::LoopEmitter(const ElementGenerator& target_element_generator,
tensorflow::gtl::ArraySlice<IrArray> target_arrays,
- llvm::IRBuilder<>* ir_builder)
+ llvm::IRBuilder<>* b)
: body_emitter_(MakeBodyEmitterForMultiOutputFusion(
target_element_generator,
- std::vector<IrArray>(target_arrays.begin(), target_arrays.end()),
- ir_builder)),
+ std::vector<IrArray>(target_arrays.begin(), target_arrays.end()), b)),
shape_(target_arrays[0].GetShape()),
- ir_builder_(ir_builder) {
+ b_(b) {
// Sanity check: In multi-output fusion, all shapes produced must have the
// same dimensions.
for (const IrArray& array : target_arrays) {
@@ -102,7 +98,7 @@ std::vector<IrArray::Index> LoopEmitter::EmitIndexAndSetExitBasicBlock(
// Loops are added from outermost to innermost order with the ForLoopNest
// class so emit loops in order from most-major dimension down to most-minor
// dimension (of the target shape).
- ForLoopNest loop_nest(loop_name, ir_builder_);
+ ForLoopNest loop_nest(loop_name, b_);
IrArray::Index array_index(index_type, shape_.dimensions_size());
for (int i = 0; i < LayoutUtil::MinorToMajor(shape_).size(); ++i) {
int64 dimension = LayoutUtil::Major(shape_.layout(), i);
@@ -116,8 +112,8 @@ std::vector<IrArray::Index> LoopEmitter::EmitIndexAndSetExitBasicBlock(
// Set IR builder insertion point to the loop body basic block of the
// innermost loop.
llvm::BasicBlock* innermost_body_bb = loop_nest.GetInnerLoopBodyBasicBlock();
- ir_builder_->SetInsertPoint(innermost_body_bb,
- innermost_body_bb->getFirstInsertionPt());
+ b_->SetInsertPoint(innermost_body_bb,
+ innermost_body_bb->getFirstInsertionPt());
// Set exit_bb_ to the exit block of the loop nest.
exit_bb_ = loop_nest.GetOuterLoopExitBasicBlock();
@@ -129,7 +125,7 @@ std::vector<IrArray::Index> LoopEmitter::EmitIndexAndSetExitBasicBlock(
Status LoopEmitter::EmitLoop(tensorflow::StringPiece loop_name,
llvm::Type* index_type) {
if (index_type == nullptr) {
- index_type = ir_builder_->getInt64Ty();
+ index_type = b_->getInt64Ty();
}
for (const IrArray::Index& array_index :
@@ -137,10 +133,10 @@ Status LoopEmitter::EmitLoop(tensorflow::StringPiece loop_name,
TF_RETURN_IF_ERROR(body_emitter_(array_index));
}
- // Set the insertion point of ir_builder_ to the loop exit, so that
+ // Set the insertion point of b_ to the loop exit, so that
// code emitted for later instructions will be correctly placed.
if (exit_bb_ != nullptr) {
- ir_builder_->SetInsertPoint(exit_bb_);
+ b_->SetInsertPoint(exit_bb_);
}
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h
index 6be1c2fba2..c4f5c82086 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h
@@ -41,11 +41,11 @@ class LoopEmitter {
using BodyEmitter = std::function<Status(const IrArray::Index& index)>;
LoopEmitter(const BodyEmitter& body_emitter, const Shape& shape,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
// Constructs a LoopEmitter from an element generator that generates each
// element of the given target array.
LoopEmitter(const ElementGenerator& target_element_generator,
- const IrArray& target_array, llvm::IRBuilder<>* ir_builder);
+ const IrArray& target_array, llvm::IRBuilder<>* b);
// Constructs a LoopEmitter that emits one element into each of N separate
// arrays on each iteration of the loop.
@@ -54,7 +54,7 @@ class LoopEmitter {
// produce an LLVM struct with N elements.
LoopEmitter(const ElementGenerator& target_element_generator,
tensorflow::gtl::ArraySlice<IrArray> target_arrays,
- llvm::IRBuilder<>* ir_builder);
+ llvm::IRBuilder<>* b);
LoopEmitter(const LoopEmitter&) = delete;
LoopEmitter& operator=(const LoopEmitter&) = delete;
@@ -65,8 +65,7 @@ class LoopEmitter {
// specifies the element, will return multiple indices if the loop is
// unrolled.
std::vector<IrArray::Index> EmitIndexAndSetExitBasicBlock() {
- return EmitIndexAndSetExitBasicBlock(/*loop_name=*/"",
- ir_builder_->getInt64Ty());
+ return EmitIndexAndSetExitBasicBlock(/*loop_name=*/"", b_->getInt64Ty());
}
virtual std::vector<IrArray::Index> EmitIndexAndSetExitBasicBlock(
@@ -87,7 +86,7 @@ class LoopEmitter {
// scalar, no loops are emitted and exit_bb_ is nullptr in that case.
llvm::BasicBlock* exit_bb_;
- llvm::IRBuilder<>* ir_builder_;
+ llvm::IRBuilder<>* b_;
};
} // namespace llvm_ir
diff --git a/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc b/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc
new file mode 100644
index 0000000000..6f261c32f4
--- /dev/null
+++ b/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc
@@ -0,0 +1,155 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/compiler/xla/service/llvm_ir/sort_util.h"
+
+// IWYU pragma: no_include "llvm/IR/Intrinsics.gen.inc"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "tensorflow/compiler/xla/primitive_util.h"
+#include "tensorflow/compiler/xla/service/gpu/parallel_loop_emitter.h"
+#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/llvm_util.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/loop_emitter.h"
+#include "tensorflow/compiler/xla/shape_util.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace xla {
+namespace llvm_ir {
+
+namespace {
+// Adds the inner comparison loop where we compare elements pointed to by
+// 'keys_index' and 'compare_keys_index'.
+void EmitCompareLoop(int64 dimension_to_sort,
+ const llvm_ir::IrArray::Index& keys_index,
+ const llvm_ir::IrArray::Index& compare_keys_index,
+ const llvm_ir::IrArray& keys_array, llvm::IRBuilder<>* b) {
+ // TODO(b/26783907): parallelize this loop.
+
+ // if (is_smaller_index &&
+ // compare_keys[dimension_to_sort] < dimension_to_sort_bound)
+ llvm::Value* is_smaller_index = b->CreateICmpSLT(
+ keys_index[dimension_to_sort], compare_keys_index[dimension_to_sort]);
+ int64 dimension_to_sort_bound =
+ keys_array.GetShape().dimensions(dimension_to_sort);
+ auto if_data = llvm_ir::EmitIfThenElse(
+ b->CreateAnd(is_smaller_index,
+ b->CreateICmpSLT(compare_keys_index[dimension_to_sort],
+ keys_index.GetConstantWithIndexType(
+ dimension_to_sort_bound))),
+ "smaller_comparison_index", b, /*emit_else=*/false);
+ SetToFirstInsertPoint(if_data.true_block, b);
+ auto key1 = keys_array.EmitReadArrayElement(keys_index, b);
+ auto key2 = keys_array.EmitReadArrayElement(compare_keys_index, b);
+ auto key_type = keys_array.GetShape().element_type();
+ auto comparison =
+ primitive_util::IsFloatingPointType(key_type)
+ // TODO(b/26783907): Figure out how to handle NaNs.
+ ? b->CreateFCmp(llvm::FCmpInst::FCMP_ULT, key1, key2)
+ : b->CreateICmp(primitive_util::IsSignedIntegralType(key_type)
+ ? llvm::ICmpInst::ICMP_SLT
+ : llvm::ICmpInst::ICMP_ULT,
+ key1, key2);
+ auto min_key = b->CreateSelect(comparison, key1, key2);
+ auto max_key = b->CreateSelect(comparison, key2, key1);
+ keys_array.EmitWriteArrayElement(keys_index, min_key, b);
+ keys_array.EmitWriteArrayElement(compare_keys_index, max_key, b);
+}
+} // namespace
+
+Status EmitSortInPlace(int64 dimension_to_sort, const IrArray& keys_array,
+ tensorflow::StringPiece name, llvm::Value* xor_mask,
+ llvm::IRBuilder<>* b,
+ const gpu::LaunchDimensions* launch_dimensions) {
+ const Shape& keys_shape = keys_array.GetShape();
+
+ // TODO(b/26783907): This case can probably be avoided with the Algebraic
+ // Simplifier.
+ if (ShapeUtil::IsScalar(keys_shape)) {
+ return Status::OK();
+ }
+
+ // Create loop nests which loop through the operand dimensions. The sort
+ // dimension is handled in the innermost loop which performs the sorting.
+ ForLoopNest loop_nest(name, b);
+ IrArray::Index keys_index =
+ loop_nest.EmitOperandArrayLoopNest(keys_array, dimension_to_sort, "keys");
+ if (loop_nest.GetInnerLoopBodyBasicBlock() != nullptr) {
+ SetToFirstInsertPoint(loop_nest.GetInnerLoopBodyBasicBlock(), b);
+ }
+
+ // 'compare_keys_index' is the index of the element that 'keys_index' should
+ // be compared to.
+ IrArray::Index compare_keys_index(keys_index.GetType());
+ for (size_t dimension = 0; dimension < keys_index.size(); ++dimension) {
+ if (dimension != dimension_to_sort) {
+ compare_keys_index.push_back(keys_index[dimension]);
+ } else {
+ compare_keys_index.push_back(nullptr);
+ }
+ }
+
+ // Naive C++ code for the inner compare loop:
+ //
+ // for (int64 i = 0; i < dimension_to_sort_bound; ++i) {
+ // int64 j = i ^ xor_mask;
+ // if (i < j && j < dimension_to_sort_bound) {
+ // int64 min_key = std::min(keys[i], keys[j]);
+ // keys[j] = std::max(keys[i], keys[j]);
+ // keys[i] = min_key;
+ // }
+ // }
+ //
+ // This follows the algorithm described on Wikipedia:
+ // https://en.wikipedia.org/wiki/Bitonic_sorter
+
+ int64 dimension_to_sort_bound =
+ keys_array.GetShape().dimensions(dimension_to_sort);
+ Shape compare_shape = ShapeUtil::MakeShape(keys_shape.element_type(),
+ {dimension_to_sort_bound});
+ auto compare_loop_body_emitter =
+ [&](const IrArray::Index& compare_index) -> Status {
+ keys_index[dimension_to_sort] = compare_index[0];
+ compare_keys_index[dimension_to_sort] =
+ b->CreateXor(compare_index[0], xor_mask);
+ EmitCompareLoop(dimension_to_sort, keys_index, compare_keys_index,
+ keys_array, b);
+ return Status::OK();
+ };
+ if (launch_dimensions != nullptr) {
+ TF_RETURN_IF_ERROR(gpu::ParallelLoopEmitter(compare_loop_body_emitter,
+ compare_shape,
+ *launch_dimensions, b)
+ .EmitLoop(name));
+ } else {
+ TF_RETURN_IF_ERROR(LoopEmitter(compare_loop_body_emitter, compare_shape, b)
+ .EmitLoop(name));
+ }
+
+ // Set the IR builder insert point to the exit basic block of the outer most
+ // loop. This ensures later instructions are inserted after this loop nest.
+ b->SetInsertPoint(loop_nest.GetOuterLoopExitBasicBlock());
+
+ return Status::OK();
+}
+
+} // namespace llvm_ir
+} // namespace xla
diff --git a/tensorflow/compiler/xla/service/llvm_ir/sort_util.h b/tensorflow/compiler/xla/service/llvm_ir/sort_util.h
new file mode 100644
index 0000000000..e75f9b08fb
--- /dev/null
+++ b/tensorflow/compiler/xla/service/llvm_ir/sort_util.h
@@ -0,0 +1,39 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_SORT_UTIL_H_
+#define TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_SORT_UTIL_H_
+
+#include "llvm/IR/Value.h"
+#include "tensorflow/compiler/xla/service/gpu/partition_assignment.h"
+#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
+#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/platform/types.h"
+
+namespace xla {
+namespace llvm_ir {
+// Emits llvm IR to do pairwise comparisons/swaps in the 'dimension_to_sort'
+// dimension of 'keys_array'. All other dimensions are kept as-is. This
+// implements the inner loop of BitonicSort. If 'launch_dimensions' is nullptr,
+// the inner compare loop will not be parallelized.
+Status EmitSortInPlace(int64 dimension_to_sort, const IrArray& keys_array,
+ tensorflow::StringPiece name, llvm::Value* xor_mask,
+ llvm::IRBuilder<>* b,
+ const gpu::LaunchDimensions* launch_dimensions);
+} // namespace llvm_ir
+} // namespace xla
+
+#endif // TENSORFLOW_COMPILER_XLA_SERVICE_LLVM_IR_SORT_UTIL_H_
diff --git a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc
index 5fc08aab91..11ed6ee59f 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.cc
@@ -31,12 +31,12 @@ namespace llvm_ir {
void EmitTupleSelect(const IrArray& select, const IrArray& pred,
llvm::Value* on_true, llvm::Value* on_false,
- llvm::IRBuilder<>* ir_builder, llvm::Module* module) {
+ llvm::IRBuilder<>* b, llvm::Module* module) {
CHECK(ShapeUtil::IsScalar(pred.GetShape()));
llvm::LoadInst* pred_value =
- ir_builder->CreateLoad(pred.GetBasePointer(), "load_predicate_value");
- llvm::Value* pred_cond = ir_builder->CreateICmpNE(
+ b->CreateLoad(pred.GetBasePointer(), "load_predicate_value");
+ llvm::Value* pred_cond = b->CreateICmpNE(
pred_value,
llvm::ConstantInt::get(PrimitiveTypeToIrType(PRED, module), 0),
"boolean_predicate");
@@ -46,47 +46,42 @@ void EmitTupleSelect(const IrArray& select, const IrArray& pred,
VLOG(2) << " pred_cond: " << DumpToString(*pred_cond);
for (int i = 0; i < ShapeUtil::TupleElementCount(select.GetShape()); ++i) {
- llvm::Value* const element_index[] = {ir_builder->getInt64(0),
- ir_builder->getInt64(i)};
+ llvm::Value* const element_index[] = {b->getInt64(0), b->getInt64(i)};
llvm::Value* on_true_element_address =
- ir_builder->CreateInBoundsGEP(on_true, element_index);
- llvm::Value* on_true_element = ir_builder->CreateLoad(
+ b->CreateInBoundsGEP(on_true, element_index);
+ llvm::Value* on_true_element = b->CreateLoad(
on_true_element_address, "on_true_element_" + llvm::Twine(i));
llvm::Value* on_false_element_address =
- ir_builder->CreateInBoundsGEP(on_false, element_index);
- llvm::Value* on_false_element = ir_builder->CreateLoad(
+ b->CreateInBoundsGEP(on_false, element_index);
+ llvm::Value* on_false_element = b->CreateLoad(
on_false_element_address, "on_false_element_" + llvm::Twine(i));
llvm::Value* output_element_address =
- ir_builder->CreateInBoundsGEP(select.GetBasePointer(), element_index);
- ir_builder->CreateStore(
- ir_builder->CreateSelect(pred_cond, on_true_element, on_false_element,
- "select_output_element_" + llvm::Twine(i)),
- output_element_address);
+ b->CreateInBoundsGEP(select.GetBasePointer(), element_index);
+ b->CreateStore(b->CreateSelect(pred_cond, on_true_element, on_false_element,
+ "select_output_element_" + llvm::Twine(i)),
+ output_element_address);
}
}
void EmitTuple(const IrArray& tuple,
tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- llvm::IRBuilder<>* ir_builder, llvm::Module* module) {
+ llvm::IRBuilder<>* b, llvm::Module* module) {
for (size_t i = 0; i < operands.size(); ++i) {
- auto* store = ir_builder->CreateStore(
- ir_builder->CreatePointerCast(operands[i],
- PrimitiveTypeToIrType(TUPLE, module)),
- ir_builder->CreateInBoundsGEP(
- tuple.GetBasePointer(),
- {ir_builder->getInt64(0), ir_builder->getInt64(i)}));
+ auto* store = b->CreateStore(
+ b->CreatePointerCast(operands[i], PrimitiveTypeToIrType(TUPLE, module)),
+ b->CreateInBoundsGEP(tuple.GetBasePointer(),
+ {b->getInt64(0), b->getInt64(i)}));
tuple.AnnotateLoadStoreInstructionWithMetadata(store);
}
}
llvm::Value* EmitGetTupleElement(const Shape& target_shape, int64 index,
int alignment, llvm::Value* operand,
- llvm::IRBuilder<>* ir_builder,
- llvm::Module* module) {
- llvm::Value* element_ptr = ir_builder->CreateInBoundsGEP(
- operand, {ir_builder->getInt64(0), ir_builder->getInt64(index)});
- llvm::LoadInst* src_buffer = ir_builder->CreateLoad(element_ptr);
+ llvm::IRBuilder<>* b, llvm::Module* module) {
+ llvm::Value* element_ptr =
+ b->CreateInBoundsGEP(operand, {b->getInt64(0), b->getInt64(index)});
+ llvm::LoadInst* src_buffer = b->CreateLoad(element_ptr);
// Mark the loaded pointer as dereferenceable if we know its shape.
if (!ShapeUtil::IsOpaque(target_shape)) {
@@ -98,7 +93,7 @@ llvm::Value* EmitGetTupleElement(const Shape& target_shape, int64 index,
llvm::Type* element_type = ShapeToIrType(target_shape, module);
llvm::Value* ret_val =
- ir_builder->CreateBitCast(src_buffer, element_type->getPointerTo());
+ b->CreateBitCast(src_buffer, element_type->getPointerTo());
return ret_val;
}
diff --git a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h
index 352d34ebf8..cf6bf5d0b1 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/tuple_ops.h
@@ -61,13 +61,13 @@ namespace llvm_ir {
// output[i] = pred ? tuple_on_true[i] : tuple_on_false[i]
void EmitTupleSelect(const IrArray& select, const IrArray& pred,
llvm::Value* on_true, llvm::Value* on_false,
- llvm::IRBuilder<>* ir_builder, llvm::Module* module);
+ llvm::IRBuilder<>* b, llvm::Module* module);
// A tuple is an array of pointers, one for each operand. Each pointer points to
// the output buffer of its corresponding operand.
void EmitTuple(const IrArray& tuple,
tensorflow::gtl::ArraySlice<llvm::Value*> operands,
- llvm::IRBuilder<>* ir_builder, llvm::Module* module);
+ llvm::IRBuilder<>* b, llvm::Module* module);
// A tuple is an array of pointers, one for each operand. Each pointer points to
// the output buffer of its corresponding operand. A GetTupleElement instruction
@@ -75,8 +75,7 @@ void EmitTuple(const IrArray& tuple,
// Returns an llvm value representing a pointer to the tuple element buffer.
llvm::Value* EmitGetTupleElement(const Shape& target_shape, int64 index,
int alignment, llvm::Value* operand,
- llvm::IRBuilder<>* ir_builder,
- llvm::Module* module);
+ llvm::IRBuilder<>* b, llvm::Module* module);
} // namespace llvm_ir
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/platform_util.cc b/tensorflow/compiler/xla/service/platform_util.cc
index 7c63c0acc7..39fe3c7835 100644
--- a/tensorflow/compiler/xla/service/platform_util.cc
+++ b/tensorflow/compiler/xla/service/platform_util.cc
@@ -75,19 +75,6 @@ PlatformUtil::GetSupportedPlatforms() {
auto* platform = platform_pair.second;
auto compiler_status = Compiler::GetForPlatform(platform);
if (compiler_status.ok()) {
- if (platform->VisibleDeviceCount() > 0) {
- LOG(INFO) << "platform " << platform->Name() << " present with "
- << platform->VisibleDeviceCount() << " visible devices";
- } else {
- LOG(WARNING) << "platform " << platform->Name() << " present but no "
- << "visible devices found";
- }
- // Note: currently we call zero device platforms "supported" on the basis
- // that, if the platform support was linked in, it was probably intended
- // to be used for execution, and this way we can flag an error.
- //
- // TODO(b/33730287) If we want an alternative version of this behavior we
- // could add an --xla_fallback_to_host flag.
platforms.push_back(platform);
} else {
LOG(INFO) << "platform " << platform->Name() << " present but no "
diff --git a/tensorflow/compiler/xla/service/service.cc b/tensorflow/compiler/xla/service/service.cc
index da3b622bfa..636013cbb5 100644
--- a/tensorflow/compiler/xla/service/service.cc
+++ b/tensorflow/compiler/xla/service/service.cc
@@ -169,7 +169,8 @@ Service::Service(const ServiceOptions& options,
Status Service::CreateChannelHandle(const CreateChannelHandleRequest* arg,
CreateChannelHandleResponse* result) {
- *result->mutable_channel() = channel_tracker_.NewChannel();
+ TF_ASSIGN_OR_RETURN(*result->mutable_channel(),
+ channel_tracker_.NewChannel(arg->channel_type()));
return Status::OK();
}
diff --git a/tensorflow/compiler/xla/service/shape_inference.cc b/tensorflow/compiler/xla/service/shape_inference.cc
index 70edf7883f..35df792b07 100644
--- a/tensorflow/compiler/xla/service/shape_inference.cc
+++ b/tensorflow/compiler/xla/service/shape_inference.cc
@@ -222,13 +222,16 @@ StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape,
return shape;
case HloOpcode::kReal:
case HloOpcode::kImag:
- if (!ShapeUtil::ElementIsComplex(shape)) {
+ if (ShapeUtil::ElementIsComplex(shape)) {
+ return ShapeUtil::ComplexComponentShape(shape);
+ } else if (ShapeUtil::ElementIsFloating(shape)) {
+ return shape;
+ } else {
return InvalidArgument(
- "Expected element type in shape to be complex for real/imag "
- "operation; got %s.",
+ "Expected element type in shape to be floating or complex for "
+ "real/imag operation; got %s.",
PrimitiveType_Name(shape.element_type()).c_str());
}
- return ShapeUtil::ChangeElementType(shape, F32);
case HloOpcode::kAbs:
if (ShapeUtil::ElementIsComplex(shape)) {
return ShapeUtil::ChangeElementType(
@@ -967,6 +970,14 @@ ShapeInference::InferDegenerateDimensionBroadcastShape(HloOpcode operation,
if (operand_shapes.size() == 1) {
return *operand_shapes[0];
} else if (operand_shapes.size() == 2) {
+ if (!ShapeUtil::SameDimensions(*operand_shapes[0],
+ *operand_shapes[1])) {
+ return InvalidArgument(
+ "Sort keys and values dimensions must match. "
+ "Keys shape is: %s\n, Values shape is: %s",
+ ShapeUtil::HumanString(*operand_shapes[0]).c_str(),
+ ShapeUtil::HumanString(*operand_shapes[1]).c_str());
+ }
return ShapeUtil::MakeTupleShape(
{*operand_shapes[0], *operand_shapes[1]});
}
diff --git a/tensorflow/compiler/xla/service/shape_inference_test.cc b/tensorflow/compiler/xla/service/shape_inference_test.cc
index bafe14d6f4..6046d50c6d 100644
--- a/tensorflow/compiler/xla/service/shape_inference_test.cc
+++ b/tensorflow/compiler/xla/service/shape_inference_test.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include <string>
+#include "tensorflow/compiler/xla/service/hlo_instructions.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -1523,6 +1524,18 @@ TEST_F(ShapeInferenceTest, BadSlice) {
<< statusor.status();
}
+TEST_F(ShapeInferenceTest, BadSort) {
+ auto keys = ShapeUtil::MakeShape(F32, {4});
+ auto values = ShapeUtil::MakeShape(F32, {5});
+ StatusOr<Shape> statusor =
+ ShapeInference::InferVariadicOpShape(HloOpcode::kSort, {&keys, &values});
+ ASSERT_FALSE(statusor.ok());
+
+ EXPECT_THAT(statusor.status().error_message(),
+ HasSubstr("dimensions must match"))
+ << statusor.status();
+}
+
class GatherShapeInferenceTest : public ShapeInferenceTest {
protected:
const Shape s64_scalar_ = ShapeUtil::MakeShape(S64, {});
@@ -1543,45 +1556,45 @@ class GatherShapeInferenceTest : public ShapeInferenceTest {
};
TEST_F(GatherShapeInferenceTest, TensorFlowGather) {
- TF_ASSERT_OK_AND_ASSIGN(
- Shape gather_shape,
- ShapeInference::InferGatherShape(matrix_64_48_, s64_vector_32_,
- HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/1),
- /*window_bounds=*/{64, 1}));
+ TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+ ShapeInference::InferGatherShape(
+ matrix_64_48_, s64_vector_32_,
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/1),
+ /*window_bounds=*/{64, 1}));
EXPECT_TRUE(
ShapeUtil::Equal(gather_shape, ShapeUtil::MakeShape(F32, {64, 32})))
<< ShapeUtil::HumanString(gather_shape);
}
TEST_F(GatherShapeInferenceTest, TensorFlowGatherV2) {
- TF_ASSERT_OK_AND_ASSIGN(
- Shape gather_shape,
- ShapeInference::InferGatherShape(matrix_64_48_, s64_vector_32_,
- HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/{1},
- /*elided_window_dims=*/{0},
- /*gather_dims_to_operand_dims=*/{0},
- /*index_vector_dim=*/1),
- /*window_bounds=*/{1, 48}));
+ TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+ ShapeInference::InferGatherShape(
+ matrix_64_48_, s64_vector_32_,
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{1},
+ /*elided_window_dims=*/{0},
+ /*gather_dims_to_operand_dims=*/{0},
+ /*index_vector_dim=*/1),
+ /*window_bounds=*/{1, 48}));
EXPECT_TRUE(
ShapeUtil::Equal(gather_shape, ShapeUtil::MakeShape(F32, {32, 48})))
<< ShapeUtil::HumanString(gather_shape);
}
TEST_F(GatherShapeInferenceTest, TensorFlowGatherNd) {
- TF_ASSERT_OK_AND_ASSIGN(
- Shape gather_shape,
- ShapeInference::InferGatherShape(matrix_64_48_, s64_4d_tensor_10_9_8_7_1_,
- HloInstruction::MakeGatherDimNumbers(
- /*output_window_dims=*/{4},
- /*elided_window_dims=*/{0},
- /*gather_dims_to_operand_dims=*/{0},
- /*index_vector_dim=*/4),
- /*window_bounds=*/{1, 48}));
+ TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
+ ShapeInference::InferGatherShape(
+ matrix_64_48_, s64_4d_tensor_10_9_8_7_1_,
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{4},
+ /*elided_window_dims=*/{0},
+ /*gather_dims_to_operand_dims=*/{0},
+ /*index_vector_dim=*/4),
+ /*window_bounds=*/{1, 48}));
EXPECT_TRUE(ShapeUtil::Equal(gather_shape,
ShapeUtil::MakeShape(F32, {10, 9, 8, 7, 48})))
<< ShapeUtil::HumanString(gather_shape);
@@ -1592,7 +1605,7 @@ TEST_F(GatherShapeInferenceTest, TensorFlowBatchDynamicSlice) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1609,7 +1622,7 @@ TEST_F(GatherShapeInferenceTest, NonDefaultGatherIndicesLeafDim_A) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_5_7_6_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1627,7 +1640,7 @@ TEST_F(GatherShapeInferenceTest, NonDefaultGatherIndicesLeafDim_B) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_5_10_9_7_6_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1646,7 +1659,7 @@ TEST_F(GatherShapeInferenceTest, NoOutputGatherDims) {
Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_vector_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{0, 1, 2, 3, 4},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1664,7 +1677,7 @@ TEST_F(GatherShapeInferenceTest, ScalarGatherIndices) {
TF_ASSERT_OK_AND_ASSIGN(Shape gather_shape,
ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_scalar_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{0, 1, 2, 3},
/*elided_window_dims=*/{0},
/*gather_dims_to_operand_dims=*/{0},
@@ -1679,10 +1692,11 @@ TEST_F(GatherShapeInferenceTest, ScalarGatherIndices) {
TEST_F(GatherShapeInferenceTest, TupleShapedTensorInput) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
tuple_shape_, s64_vector_32_,
- HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/1),
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/1),
/*window_bounds=*/{64, 1});
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -1693,10 +1707,11 @@ TEST_F(GatherShapeInferenceTest, TupleShapedTensorInput) {
TEST_F(GatherShapeInferenceTest, TupleShapedGatherIndicesInput) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
s64_vector_32_, tuple_shape_,
- HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/0),
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/0),
/*window_bounds=*/{64, 1});
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -1707,10 +1722,11 @@ TEST_F(GatherShapeInferenceTest, TupleShapedGatherIndicesInput) {
TEST_F(GatherShapeInferenceTest, FloatingPointGatherIndicesInput) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
s64_vector_32_, vector_32_,
- HloInstruction::MakeGatherDimNumbers(/*output_window_dims=*/{0},
- /*elided_window_dims=*/{1},
- /*gather_dims_to_operand_dims=*/{1},
- /*index_vector_dim=*/0),
+ HloGatherInstruction::MakeGatherDimNumbers(
+ /*output_window_dims=*/{0},
+ /*elided_window_dims=*/{1},
+ /*gather_dims_to_operand_dims=*/{1},
+ /*index_vector_dim=*/0),
/*window_bounds=*/{64, 1});
ASSERT_FALSE(statusor.ok());
EXPECT_THAT(statusor.status().error_message(),
@@ -1722,7 +1738,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_NonAscendingWindowIndices) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 8, 7},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1739,7 +1755,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_RepeatedWindowIndices) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 7},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1756,7 +1772,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_WindowIndexOutOfBounds) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 99, 100, 101},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1772,7 +1788,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_WindowIndexBarelyOutOfBounds) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 9},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1788,7 +1804,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_MismatchingElidedWindowDims) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{4},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1806,7 +1822,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_OutOfBoundsWindowToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{0, 1, 2, 3, 19},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1823,7 +1839,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_RepeatedWindowToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{0, 1, 2, 3, 3},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1841,7 +1857,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_MismatchingGatherToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3},
@@ -1860,7 +1876,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_OutOfBoundsGatherToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 7},
@@ -1878,7 +1894,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_RepeatedGatherToInputMapping) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 3},
@@ -1896,7 +1912,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_NonAscendingElidedWindowDims) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{2, 1},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1911,7 +1927,7 @@ TEST_F(GatherShapeInferenceTest,
TEST_F(GatherShapeInferenceTest, InvalidGatherDimNumbers_WindowBoundsTooLarge) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7},
/*elided_window_dims=*/{2},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1928,7 +1944,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_MismatchingNumberOfWindowBounds) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1946,7 +1962,7 @@ TEST_F(GatherShapeInferenceTest,
InvalidGatherDimNumbers_WindowBoundsNot1ForElidedDim) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_8_7_5_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7},
/*elided_window_dims=*/{1},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
@@ -1962,7 +1978,7 @@ TEST_F(GatherShapeInferenceTest,
TEST_F(GatherShapeInferenceTest, OutOfBoundsGatherIndicesLeafDim) {
StatusOr<Shape> statusor = ShapeInference::InferGatherShape(
f32_5d_tensor_50_49_48_47_46_, s64_4d_tensor_10_9_5_7_6_,
- HloInstruction::MakeGatherDimNumbers(
+ HloGatherInstruction::MakeGatherDimNumbers(
/*output_window_dims=*/{4, 5, 6, 7, 8},
/*elided_window_dims=*/{},
/*gather_dims_to_operand_dims=*/{0, 1, 2, 3, 4},
diff --git a/tensorflow/compiler/xla/service/transfer_manager.h b/tensorflow/compiler/xla/service/transfer_manager.h
index 249bdcc1f5..82c599e482 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.h
+++ b/tensorflow/compiler/xla/service/transfer_manager.h
@@ -167,16 +167,6 @@ class TransferManager {
const se::Platform* platform);
protected:
- // Transfer a memory block of the given size from 'source' buffer to the
- // Infeed interface of the device using the given executor.
- //
- // size is the size to transfer from source in bytes.
- //
- // source is the source data that must be in the target-dependent layout that
- // the Infeed HLO used in the computation expects.
- virtual Status TransferBufferToInfeed(se::StreamExecutor* executor,
- int64 size, const void* source) = 0;
-
// Transfer a memory block of the given size from the device source into the
// 'destination' buffer.
//
diff --git a/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc b/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
index 10fc4958fa..62af45128a 100644
--- a/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
+++ b/tensorflow/compiler/xla/service/while_loop_constant_sinking.cc
@@ -61,6 +61,12 @@ StatusOr<bool> WhileLoopConstantSinking::TrySinkingConstantsIntoWhileBody(
WhileUtil::GetInvariantGTEsForWhileBody(*while_body)) {
int64 index = invariant_gte->tuple_index();
const HloInstruction& invariant_value = *init_value.operand(index);
+
+ // Should have at least one user that's not while_body_root.
+ if (invariant_gte->user_count() <= 1) {
+ continue;
+ }
+
if (invariant_value.opcode() == HloOpcode::kConstant) {
auto* constant_instr =
while_body->AddInstruction(invariant_value.Clone(/*suffix=*/".sunk"));
diff --git a/tensorflow/compiler/xla/service/while_loop_constant_sinking_test.cc b/tensorflow/compiler/xla/service/while_loop_constant_sinking_test.cc
index 393e758038..266039d2ff 100644
--- a/tensorflow/compiler/xla/service/while_loop_constant_sinking_test.cc
+++ b/tensorflow/compiler/xla/service/while_loop_constant_sinking_test.cc
@@ -196,5 +196,50 @@ ENTRY entry {
op::GetTupleElement(op::Parameter(0)),
op::GetTupleElement(op::Parameter(0))));
}
+
+TEST_F(WhileLoopConstantSinkingTest, DontCreateDeadConstant) {
+ const char* const hlo_string = R"(
+HloModule ModuleWithWhile
+
+body {
+ p_body = (f32[2],f32[2]) parameter(0)
+ p_body.0 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=0
+ p_body.1 = f32[2] get-tuple-element((f32[2],f32[2]) p_body), index=1
+
+ outfeed = token[] outfeed(p_body.0)
+ ROOT root = (f32[2],f32[2],f32[2]) tuple(p_body.0, p_body.1, p_body.1)
+}
+
+condition {
+ p_cond = (f32[2],f32[2]) parameter(0)
+ ROOT result = pred[] constant(true)
+}
+
+ENTRY entry {
+ const_0 = f32[2] constant({1, 2})
+ const_1 = f32[2] constant({2, 1})
+ while_init = (f32[2],f32[2]) tuple(const_0, const_1)
+ ROOT while = (f32[2],f32[2],f32[2]) while(while_init), condition=condition,
+ body=body
+}
+)";
+
+ TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
+ ParseHloString(hlo_string));
+
+ TF_ASSERT_OK_AND_ASSIGN(bool changed,
+ WhileLoopConstantSinking{}.Run(module.get()));
+ ASSERT_TRUE(changed);
+
+ auto* while_body = module->GetComputationWithName("body");
+ EXPECT_THAT(while_body->root_instruction(),
+ op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
+ op::GetTupleElement()));
+ for (const HloInstruction* inst : while_body->instructions()) {
+ if (inst->opcode() == HloOpcode::kConstant) {
+ EXPECT_GT(inst->user_count(), 0);
+ }
+ }
+}
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/shape_tree.h b/tensorflow/compiler/xla/shape_tree.h
index 4aacc87b78..c74dd648ad 100644
--- a/tensorflow/compiler/xla/shape_tree.h
+++ b/tensorflow/compiler/xla/shape_tree.h
@@ -44,10 +44,6 @@ struct ShapeTreeNode {
// Data corresponding to this node.
std::pair<ShapeIndex, T> data;
- // Children of this node, as indices into the container's nodes_ array.
- std::vector<size_t> children;
-
- // Tells whether this is a leaf node.
bool is_leaf = true;
explicit ShapeTreeNode(ShapeIndex index)
@@ -56,6 +52,20 @@ struct ShapeTreeNode {
: data(std::move(index), std::move(data)) {}
};
+// Internal representation of an index table entry.
+struct IndexTableEntry {
+ // Index of the node in the ShapeTreeNode vector.
+ uint32 index;
+ // Index of the first child in a IndexTableEntry vector. In the index
+ // table all children entries for a given node will be placed next to each
+ // other. This allows us to use a single field to index them.
+ uint32 children_start;
+#ifndef NDEBUG
+ // Number of children, used for bounds checking.
+ uint32 children_count;
+#endif
+};
+
} // namespace internal
template <typename ContainerType, typename IteratorType, typename ValueType>
@@ -84,6 +94,7 @@ template <typename T>
class ShapeTree {
public:
using Node = internal::ShapeTreeNode<T>;
+ using Index = internal::IndexTableEntry;
// Default constructor creates a tree with a nil shape (i.e. an empty tuple).
ShapeTree() : ShapeTree(ShapeUtil::MakeNil()) {}
@@ -267,11 +278,12 @@ class ShapeTree {
private:
// Initialize node->children based on 'shape'. All children are assigned the
// the given 'init_value'.
- void InitChildren(const Shape& shape, const T& init_value, Node* node);
+ void InitChildren(const Shape& shape, const T& init_value, Node* node,
+ Index* index);
// Initialize node->children based on 'shape'. All children have
// default-constructed data values.
- void InitChildren(const Shape& shape, Node* node);
+ void InitChildren(const Shape& shape, Node* node, Index* index);
// Returns the number of subshapes, including interior nodes, in shape.
int64 CountSubshapes(const Shape& shape);
@@ -291,6 +303,9 @@ class ShapeTree {
// The nodes in this shape tree.
std::vector<Node> nodes_;
+ // Index table for node lookups.
+ std::vector<Index> index_table_;
+
// If we own our Shape, this field contains it, and shape_ is a pointer into
// here. Otherwise if we don't own our shape, this is nullptr.
std::shared_ptr<Shape> shape_storage_;
@@ -373,36 +388,74 @@ int64 ShapeTree<T>::CountSubshapes(const Shape& shape) {
template <typename T>
void ShapeTree<T>::InitChildren(const Shape& shape, const T& init_value,
- Node* node) {
+ Node* node, Index* index) {
if (ShapeUtil::IsTuple(shape)) {
const int64 size = ShapeUtil::TupleElementCount(shape);
- node->children.reserve(size);
+#ifndef NDEBUG
+ index->children_count = size;
+#endif
node->is_leaf = false;
ShapeIndex shape_index = node->data.first;
shape_index.push_back(0);
+
+ // At the end of the index_table, reserve a continuous space to hold the
+ // children of current node. In order to enforce the invariant that all
+ // children of a given node are placed together, we need to do the
+ // reservation before we recurse into any of its children.
+ int64 children_start_position = index_table_.size();
+ index_table_.resize(index_table_.size() + size);
+
for (int i = 0; i < size; ++i) {
shape_index[shape_index.size() - 1] = i;
- node->children.push_back(nodes_.size());
+ index_table_[children_start_position + i].index = nodes_.size();
+ // The first child of the node in the index table is placed at the end of
+ // the table.
+ index_table_[children_start_position + i].children_start =
+ index_table_.size();
nodes_.emplace_back(shape_index, init_value);
- InitChildren(shape.tuple_shapes(i), init_value, &nodes_.back());
+ InitChildren(shape.tuple_shapes(i), init_value, &nodes_.back(),
+ &index_table_[children_start_position + i]);
}
+ } else {
+#ifndef NDEBUG
+ index->children_count = 0;
+#endif
}
}
template <typename T>
-void ShapeTree<T>::InitChildren(const Shape& shape, Node* node) {
+void ShapeTree<T>::InitChildren(const Shape& shape, Node* node, Index* index) {
if (ShapeUtil::IsTuple(shape)) {
const int64 size = ShapeUtil::TupleElementCount(shape);
- node->children.reserve(size);
+#ifndef NDEBUG
+ index->children_count = size;
+#endif
node->is_leaf = false;
ShapeIndex shape_index = node->data.first;
shape_index.push_back(0);
+
+ // At the end of the index_table, reserve a continuous space to hold the
+ // children of current node. In order to enforce the invariant that all
+ // children of a given node are placed together, we need to do the
+ // reservation before we recurse into any of its children.
+ int64 children_start_position = index_table_.size();
+ index_table_.resize(index_table_.size() + size);
+
for (int i = 0; i < size; ++i) {
shape_index[shape_index.size() - 1] = i;
- node->children.push_back(nodes_.size());
+ index_table_[children_start_position + i].index = nodes_.size();
+ // The first child of the node in the index table is placed at the end of
+ // the table.
+ index_table_[children_start_position + i].children_start =
+ index_table_.size();
nodes_.emplace_back(shape_index);
- InitChildren(shape.tuple_shapes(i), &nodes_.back());
+ InitChildren(shape.tuple_shapes(i), &nodes_.back(),
+ &index_table_[children_start_position + i]);
}
+ } else {
+#ifndef NDEBUG
+ index->children_count = 0;
+#endif
}
}
@@ -413,24 +466,36 @@ ShapeTree<T>::ShapeTree(Shape shape)
// The shape_ field is just used to hold the structure of the shape.
// It should not be relied upon to store layout information.
LayoutUtil::ClearLayout(shape_storage_.get());
- nodes_.reserve(CountSubshapes(*shape_));
+ const int64 count = CountSubshapes(*shape_);
+ nodes_.reserve(count);
nodes_.emplace_back(ShapeIndex{});
- InitChildren(*shape_, &nodes_[0]);
+
+ index_table_.reserve(count);
+ index_table_.emplace_back(Index{0, 1});
+ InitChildren(*shape_, &nodes_[0], &index_table_[0]);
}
template <typename T>
ShapeTree<T>::ShapeTree(const Shape* shape) : shape_(shape) {
- nodes_.reserve(CountSubshapes(*shape_));
+ const int64 count = CountSubshapes(*shape_);
+ nodes_.reserve(count);
nodes_.emplace_back(ShapeIndex{});
- InitChildren(*shape_, &nodes_[0]);
+
+ index_table_.reserve(count);
+ index_table_.emplace_back(Index{0, 1});
+ InitChildren(*shape_, &nodes_[0], &index_table_[0]);
}
template <typename T>
ShapeTree<T>::ShapeTree(const std::shared_ptr<Shape>& shape)
: shape_storage_(shape), shape_(shape_storage_.get()) {
- nodes_.reserve(CountSubshapes(*shape_));
+ const int64 count = CountSubshapes(*shape_);
+ nodes_.reserve(count);
nodes_.emplace_back(ShapeIndex{});
- InitChildren(*shape_, &nodes_[0]);
+
+ index_table_.reserve(count);
+ index_table_.emplace_back(Index{0, 1});
+ InitChildren(*shape_, &nodes_[0], &index_table_[0]);
}
template <typename T>
@@ -440,26 +505,38 @@ ShapeTree<T>::ShapeTree(Shape shape, const T& init_value)
// The shape_ field is just used to hold the structure of the shape.
// It should not be relied upon to store layout information.
LayoutUtil::ClearLayout(shape_storage_.get());
- nodes_.reserve(CountSubshapes(*shape_));
+ const int64 count = CountSubshapes(*shape_);
+ nodes_.reserve(count);
nodes_.emplace_back(ShapeIndex{}, init_value);
- InitChildren(*shape_, init_value, &nodes_[0]);
+
+ index_table_.reserve(count);
+ index_table_.emplace_back(Index{0, 1});
+ InitChildren(*shape_, init_value, &nodes_[0], &index_table_[0]);
}
template <typename T>
ShapeTree<T>::ShapeTree(const Shape* shape, const T& init_value)
: shape_(shape) {
- nodes_.reserve(CountSubshapes(*shape_));
+ const int64 count = CountSubshapes(*shape_);
+ nodes_.reserve(count);
nodes_.emplace_back(ShapeIndex{}, init_value);
- InitChildren(*shape_, init_value, &nodes_[0]);
+
+ index_table_.reserve(count);
+ index_table_.emplace_back(Index{0, 1});
+ InitChildren(*shape_, init_value, &nodes_[0], &index_table_[0]);
}
template <typename T>
ShapeTree<T>::ShapeTree(const std::shared_ptr<Shape>& shape,
const T& init_value)
: shape_storage_(shape), shape_(shape_storage_.get()) {
- nodes_.reserve(CountSubshapes(*shape_));
+ const int64 count = CountSubshapes(*shape_);
+ nodes_.reserve(count);
nodes_.emplace_back(ShapeIndex{}, init_value);
- InitChildren(*shape_, init_value, &nodes_[0]);
+
+ index_table_.reserve(count);
+ index_table_.emplace_back(Index{0, 1});
+ InitChildren(*shape_, init_value, &nodes_[0], &index_table_[0]);
}
template <typename T>
@@ -474,13 +551,16 @@ T* ShapeTree<T>::mutable_element(ShapeIndexView index) {
template <typename T>
internal::ShapeTreeNode<T>* ShapeTree<T>::Lookup(ShapeIndexView index) {
- Node* node = &nodes_[0];
+ Index* iter = &index_table_[0];
for (const int64 i : index) {
CHECK_GE(i, 0);
- CHECK_LT(i, node->children.size());
- node = &nodes_[node->children[i]];
+#ifndef NDEBUG
+ CHECK_LT(i, iter->children_count);
+#endif
+ iter = &index_table_[iter->children_start + i];
}
- return node;
+
+ return &nodes_[iter->index];
}
template <typename T>
diff --git a/tensorflow/compiler/xla/shape_tree_test.cc b/tensorflow/compiler/xla/shape_tree_test.cc
index 51de82e957..4391078b64 100644
--- a/tensorflow/compiler/xla/shape_tree_test.cc
+++ b/tensorflow/compiler/xla/shape_tree_test.cc
@@ -227,14 +227,16 @@ TEST_F(ShapeTreeTest, NestedTupleShape) {
TEST_F(ShapeTreeTest, InvalidIndexingTuple) {
ShapeTree<int> shape_tree{tuple_shape_};
-
+#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({4}), "");
+#endif
}
TEST_F(ShapeTreeTest, InvalidIndexingNestedTuple) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
-
+#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({0, 0}), "");
+#endif
}
TEST_F(ShapeTreeTest, ShapeTreeOfNonCopyableType) {
@@ -602,12 +604,15 @@ void BM_Iterate(int iters, int depth, int fan_out) {
}
}
-BENCHMARK(BM_Construct)->ArgPair(2, 8);
-BENCHMARK(BM_ConstructUnowned)->ArgPair(2, 8);
-BENCHMARK(BM_Copy)->ArgPair(2, 8);
-BENCHMARK(BM_Move)->ArgPair(2, 8);
-BENCHMARK(BM_ForEach)->ArgPair(2, 8);
-BENCHMARK(BM_Iterate)->ArgPair(2, 8);
+#define BENCHMARK_WITH_ARGS(name) \
+ BENCHMARK(name)->ArgPair(2, 8)->ArgPair(1, 1000)
+
+BENCHMARK_WITH_ARGS(BM_Construct);
+BENCHMARK_WITH_ARGS(BM_ConstructUnowned);
+BENCHMARK_WITH_ARGS(BM_Copy);
+BENCHMARK_WITH_ARGS(BM_Move);
+BENCHMARK_WITH_ARGS(BM_ForEach);
+BENCHMARK_WITH_ARGS(BM_Iterate);
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/shape_util.cc b/tensorflow/compiler/xla/shape_util.cc
index f4668c0f55..ec901af1e2 100644
--- a/tensorflow/compiler/xla/shape_util.cc
+++ b/tensorflow/compiler/xla/shape_util.cc
@@ -682,7 +682,7 @@ StatusOr<Shape> ParseShapeStringInternal(tensorflow::StringPiece* s) {
CompatibleIgnoringElementType);
} else {
// Opaque, token, etc types are vacuously compatible.
- return true;
+ return lhs.element_type() == rhs.element_type();
}
}
@@ -697,7 +697,7 @@ StatusOr<Shape> ParseShapeStringInternal(tensorflow::StringPiece* s) {
CompatibleIgnoringFpPrecision);
} else {
// Opaque, token, etc types are vacuously compatible.
- return true;
+ return lhs.element_type() == rhs.element_type();
}
}
@@ -883,40 +883,51 @@ StatusOr<Shape> ParseShapeStringInternal(tensorflow::StringPiece* s) {
}
int64 shape_size = [&shape]() {
- int64 shape_size;
if (LayoutUtil::IsSparseArray(shape)) {
- shape_size = LayoutUtil::MaxSparseElements(shape.layout());
- if (shape_size < 0) {
- return shape_size;
+ int64 max_sparse_elements = LayoutUtil::MaxSparseElements(shape.layout());
+ if (max_sparse_elements < 0) {
+ return max_sparse_elements;
+ }
+ int64 sparse_elements_size = MultiplyWithoutOverflow(
+ max_sparse_elements, ByteSizeOfPrimitiveType(shape.element_type()));
+ if (sparse_elements_size < 0) {
+ return sparse_elements_size;
}
- shape_size = MultiplyWithoutOverflow(shape_size, ShapeUtil::Rank(shape));
- if (shape_size < 0) {
- return shape_size;
+ int64 sparse_indices_size =
+ MultiplyWithoutOverflow(max_sparse_elements, ShapeUtil::Rank(shape));
+ if (sparse_indices_size < 0) {
+ return sparse_indices_size;
}
- shape_size = MultiplyWithoutOverflow(shape_size, sizeof(int64));
- if (shape_size < 0) {
- return shape_size;
+ sparse_indices_size =
+ MultiplyWithoutOverflow(sparse_indices_size, sizeof(int64));
+ if (sparse_indices_size < 0) {
+ return sparse_indices_size;
+ }
+ // At this point, both sparse_indices_size and sparse_elements_size are
+ // non-negative, so we can easily check if adding them wraps.
+ if (static_cast<uint64>(sparse_elements_size) +
+ static_cast<uint64>(sparse_indices_size) >
+ INT64_MAX) {
+ return static_cast<int64>(-1);
}
}
- shape_size = 1;
-
// This is intentionally unconditional: even if the shape is sparse, we want
// to verify the densified version has a reasonable size.
+ int64 dense_shape_size = 1;
if (shape.dimensions().empty()) {
- return shape_size;
+ return dense_shape_size;
}
for (int64 dim : shape.dimensions()) {
- shape_size = MultiplyWithoutOverflow(shape_size, dim);
- if (shape_size < 0) {
- return shape_size;
+ dense_shape_size = MultiplyWithoutOverflow(dense_shape_size, dim);
+ if (dense_shape_size < 0) {
+ return dense_shape_size;
}
}
- shape_size = MultiplyWithoutOverflow(
- shape_size, ByteSizeOfPrimitiveType(shape.element_type()));
-
- return shape_size;
+ dense_shape_size = MultiplyWithoutOverflow(
+ dense_shape_size, ByteSizeOfPrimitiveType(shape.element_type()));
+ return dense_shape_size;
}();
if (shape_size < 0) {
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index d576be724e..d6f17fc965 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -31,6 +31,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
+#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/gtl/optional.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
@@ -73,10 +74,12 @@ class ShapeIndex {
// push_front is O(n^2), but shapes don't usually have a ton of dimensions.
void push_front(int64 value) { indices_.insert(indices_.begin(), value); }
- std::vector<int64>::const_iterator begin() const { return indices_.begin(); }
- std::vector<int64>::const_iterator end() const { return indices_.end(); }
- std::vector<int64>::iterator begin() { return indices_.begin(); }
- std::vector<int64>::iterator end() { return indices_.end(); }
+ using container_type = tensorflow::gtl::InlinedVector<int64, 2>;
+
+ container_type::const_iterator begin() const { return indices_.begin(); }
+ container_type::const_iterator end() const { return indices_.end(); }
+ container_type::iterator begin() { return indices_.begin(); }
+ container_type::iterator end() { return indices_.end(); }
const int64* data() const { return indices_.data(); }
@@ -97,7 +100,7 @@ class ShapeIndex {
string ToString() const;
private:
- std::vector<int64> indices_;
+ container_type indices_;
};
// A view into a ShapeIndex as above, with the cheap/easy ability to consume the
@@ -110,7 +113,7 @@ class ShapeIndex {
class ShapeIndexView {
public:
ShapeIndexView(const ShapeIndex& shape_index, int64 offset = 0)
- : indices_(shape_index.data() + offset, shape_index.size()) {
+ : indices_(shape_index.data() + offset, shape_index.size() - offset) {
CHECK_LE(offset, shape_index.size());
}
ShapeIndexView(std::initializer_list<int64> indices) : indices_(indices) {}
diff --git a/tensorflow/compiler/xla/shape_util_test.cc b/tensorflow/compiler/xla/shape_util_test.cc
index 6cdb46d674..e5dd62ae9a 100644
--- a/tensorflow/compiler/xla/shape_util_test.cc
+++ b/tensorflow/compiler/xla/shape_util_test.cc
@@ -31,6 +31,15 @@ namespace {
using ::testing::ElementsAre;
+TEST(ShapeUtilTest, ShapeIndexViewTest) {
+ ShapeIndex index = {1, 2, 3, 4};
+ ShapeIndexView index_view(index, 1);
+ EXPECT_EQ(3, index_view.size());
+ EXPECT_EQ(ShapeIndexView({2, 3, 4}), index_view);
+ EXPECT_EQ(ShapeIndexView({3, 4}), index_view.ConsumeFront());
+ EXPECT_EQ(ShapeIndexView({2, 3}), index_view.ConsumeBack());
+}
+
TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
@@ -325,6 +334,17 @@ TEST(ShapeUtilTest, IncompatibleScalarVsTuple) {
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
}
+TEST(ShapeUtilTest, OpaqueVsArray) {
+ Shape shape1 = ShapeUtil::MakeShape(F32, {5, 7});
+ Shape shape2 = ShapeUtil::MakeOpaqueShape();
+ EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
+ EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
+ EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
+ EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
+ EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
+ EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
+}
+
TEST(ShapeUtilTest, CompareShapesWithPaddedDimensionsMismatch) {
Shape shape1 = ShapeUtil::MakeShape(F32, {20, 30});
shape1.mutable_layout()->add_padded_dimensions(10);
diff --git a/tensorflow/compiler/xla/tests/BUILD b/tensorflow/compiler/xla/tests/BUILD
index 6a75aa6794..e840067056 100644
--- a/tensorflow/compiler/xla/tests/BUILD
+++ b/tensorflow/compiler/xla/tests/BUILD
@@ -2060,3 +2060,23 @@ xla_test(
"//tensorflow/core:test",
],
)
+
+xla_test(
+ name = "iota_test",
+ srcs = ["iota_test.cc"],
+ blacklisted_backends = [
+ "cpu",
+ "gpu",
+ ],
+ tags = [
+ "enable_for_xla_interpreter",
+ ],
+ deps = [
+ ":client_library_test_base",
+ ":literal_test_util",
+ ":xla_internal_test_main",
+ "//tensorflow/compiler/xla/client/xla_client:xla_builder",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:test",
+ ],
+)
diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.cc b/tensorflow/compiler/xla/tests/client_library_test_base.cc
index ef784da457..7a2e70d39f 100644
--- a/tensorflow/compiler/xla/tests/client_library_test_base.cc
+++ b/tensorflow/compiler/xla/tests/client_library_test_base.cc
@@ -273,10 +273,16 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
const Shape* shape_with_layout) {
std::vector<GlobalData*> arguments(arguments_passed_in.begin(),
arguments_passed_in.end());
+
+ // Transfer and use elements of arguments_, if the AddParam() API was used.
+ std::vector<std::unique_ptr<GlobalData>> owning_arguments;
if (!arguments_.empty()) {
CHECK(arguments.empty());
for (const auto& argument : arguments_) {
- arguments.push_back(argument.get());
+ owning_arguments.push_back(
+ client_->TransferToServer(MaybeConvertLiteralToBfloat16(argument))
+ .ValueOrDie());
+ arguments.push_back(owning_arguments.back().get());
}
}
@@ -331,10 +337,16 @@ Status ClientLibraryTestBase::ComputeAndCompareLiteralWithStatus(
ErrorSpec error, const Shape* shape_with_layout) {
std::vector<GlobalData*> arguments(arguments_passed_in.begin(),
arguments_passed_in.end());
+
+ // Transfer and use elements of arguments_, if the AddParam() API was used.
+ std::vector<std::unique_ptr<GlobalData>> owning_arguments;
if (!arguments_.empty()) {
CHECK(arguments.empty());
for (const auto& argument : arguments_) {
- arguments.push_back(argument.get());
+ owning_arguments.push_back(
+ client_->TransferToServer(MaybeConvertLiteralToBfloat16(argument))
+ .ValueOrDie());
+ arguments.push_back(owning_arguments.back().get());
}
}
@@ -454,6 +466,14 @@ ClientLibraryTestBase::ComputeValueAndReference(
// function.
std::vector<std::unique_ptr<GlobalData>> argument_data;
std::vector<std::unique_ptr<GlobalData>> ref_argument_data;
+
+ // Use `arguments_` if the AddParam() API was used. Otherwise, use
+ // plain `arguments`.
+ if (!arguments_.empty()) {
+ CHECK_EQ(arguments.size(), 0);
+ arguments = arguments_;
+ }
+
for (const auto& arg : arguments) {
TF_ASSIGN_OR_RETURN(auto data, client_->TransferToServer(arg.Clone()));
TF_ASSIGN_OR_RETURN(auto ref_data, ref_client_->TransferToServer(arg));
@@ -552,10 +572,9 @@ ClientLibraryTestBase::CreatePatternedMatrixWithZeroPadding(int rows, int cols,
XlaOp ClientLibraryTestBase::AddParam(const Literal& argument,
XlaBuilder* builder) {
- XlaOp data_handle;
- arguments_.push_back(CreateParameterAndTransferLiteral(
- arguments_.size(), argument, "", builder, &data_handle));
- return data_handle;
+ arguments_.push_back(argument.Clone());
+ return Parameter(builder, /*parameter_number=*/arguments_.size() - 1,
+ MaybeConvertShapeToBfloat16(argument.shape()), "");
}
XlaOp ClientLibraryTestBase::CreateConstantFromLiteral(const Literal& literal,
@@ -575,22 +594,39 @@ ClientLibraryTestBase::CreateParameterAndTransferLiteral(int64 parameter_number,
nullptr, builder, data_handle);
}
+Shape ClientLibraryTestBase::MaybeConvertShapeToBfloat16(const Shape& shape) {
+ if (!use_bfloat16_) {
+ return shape;
+ }
+ Shape new_shape = shape;
+ ShapeUtil::ForEachMutableSubshape(&new_shape,
+ [](Shape* subshape, const ShapeIndex&) {
+ if (subshape->element_type() == F32) {
+ subshape->set_element_type(BF16);
+ }
+ });
+ return new_shape;
+}
+
+Literal ClientLibraryTestBase::MaybeConvertLiteralToBfloat16(
+ const Literal& literal) {
+ if (use_bfloat16_) {
+ return std::move(*LiteralUtil::ConvertF32ToBF16(literal));
+ }
+ return literal.Clone();
+}
+
std::unique_ptr<GlobalData>
ClientLibraryTestBase::CreateParameterAndTransferLiteral(
int64 parameter_number, const Literal& literal, const string& name,
const DeviceHandle* device_handle, XlaBuilder* builder,
XlaOp* data_handle) {
- const Literal* param_literal = &literal;
- std::unique_ptr<Literal> converted_literal;
- if (use_bfloat16_) {
- converted_literal = LiteralUtil::ConvertF32ToBF16(literal);
- param_literal = converted_literal.get();
- }
+ Literal param_literal = MaybeConvertLiteralToBfloat16(literal);
std::unique_ptr<GlobalData> data =
- client_->TransferToServer(*param_literal, device_handle)
+ client_->TransferToServer(param_literal, device_handle)
.ConsumeValueOrDie();
*data_handle =
- Parameter(builder, parameter_number, param_literal->shape(), name);
+ Parameter(builder, parameter_number, param_literal.shape(), name);
return data;
}
diff --git a/tensorflow/compiler/xla/tests/client_library_test_base.h b/tensorflow/compiler/xla/tests/client_library_test_base.h
index fcc9347db5..f0f7ff1ea0 100644
--- a/tensorflow/compiler/xla/tests/client_library_test_base.h
+++ b/tensorflow/compiler/xla/tests/client_library_test_base.h
@@ -399,12 +399,16 @@ class ClientLibraryTestBase : public ::testing::Test {
const string& error_message)>& verify_output,
const Shape* output_with_layout = nullptr);
+ // Converts an f32 shape/literal to bf16 if use_bfloat16_ is true.
+ Literal MaybeConvertLiteralToBfloat16(const Literal& literal);
+ Shape MaybeConvertShapeToBfloat16(const Shape& shape);
+
// Whether to run tests with all float-type input/output converted to
// bfloat16.
bool use_bfloat16_ = false;
// Arguments to be passed to the computation when it runs.
- std::vector<std::unique_ptr<GlobalData>> arguments_;
+ std::vector<Literal> arguments_;
};
template <typename NativeT>
diff --git a/tensorflow/compiler/xla/tests/conditional_test.cc b/tensorflow/compiler/xla/tests/conditional_test.cc
index 35f1400fb2..369663de15 100644
--- a/tensorflow/compiler/xla/tests/conditional_test.cc
+++ b/tensorflow/compiler/xla/tests/conditional_test.cc
@@ -172,88 +172,95 @@ class ConditionalOpTest : public ClientLibraryTestBase {
// Test true and false computations that do not take any parameters.
XLA_TEST_F(ConditionalOpTest, Parameters0) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, true);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
auto operands = Tuple(&builder, {});
auto true_computation = CreateR0ConstantComputation(56.0f);
auto false_computation = CreateR0ConstantComputation(12.0f);
Conditional(pred, operands, true_computation, operands, false_computation);
- ComputeAndCompareR0<float>(&builder, 56.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 56.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 1 parameter.
XLA_TEST_F(ConditionalOpTest, Parameters1) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.0f);
auto operand2 = ConstantR0<float>(&builder, 12.0f);
auto identity = CreateR0IdentityComputation();
Conditional(pred, operand1, identity, operand2, identity);
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with two different computations in the true and false cases
// that take in different arguments.
XLA_TEST_F(ConditionalOpTest, DiffComputationsDiffArgs) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.4f);
auto operand2 = ConstantR0<float>(&builder, 12.6f);
Conditional(pred, operand1, CreateR0CeilComputation(), operand2,
CreateR0FloorComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with two different computations in the true and false cases
// that take in the same arguments.
XLA_TEST_F(ConditionalOpTest, DiffComputationsSameArg) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand = ConstantR0<float>(&builder, 12.6f);
Conditional(pred, operand, CreateR0CeilComputation(), operand,
CreateR0FloorComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with the same computation in the true and false cases but
// take in different arguments.
XLA_TEST_F(ConditionalOpTest, SameComputationDiffArgs) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.4f);
auto operand2 = ConstantR0<float>(&builder, 12.6f);
auto floor = CreateR0FloorComputation();
Conditional(pred, operand1, floor, operand2, floor);
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with the same computation in the true and false cases that
// take in the same arguments.
XLA_TEST_F(ConditionalOpTest, SameComputationSameArg) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand = ConstantR0<float>(&builder, 12.6f);
auto floor = CreateR0FloorComputation();
Conditional(pred, operand, floor, operand, floor);
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test conditional with different instances of the same computation in the true
// and false cases.
XLA_TEST_F(ConditionalOpTest, SameComputationDiffInstances) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.4f);
auto operand2 = ConstantR0<float>(&builder, 12.6f);
Conditional(pred, operand1, CreateR0FloorComputation(), operand2,
CreateR0FloorComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test the case when a call invokes a computation that contains a conditional.
@@ -268,75 +275,83 @@ XLA_TEST_F(ConditionalOpTest, ConditionalWithCall) {
auto inner_builder_result = inner_builder.Build();
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.4f);
auto operand2 = ConstantR0<float>(&builder, 12.6f);
Call(&builder, inner_builder_result.ConsumeValueOrDie(),
{pred, operand1, operand2});
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 2 parameters and predicate is
// true.
XLA_TEST_F(ConditionalOpTest, Parameters2TrueBranch) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, true);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.0f);
auto operand2 = ConstantR0<float>(&builder, 12.0f);
auto operands = Tuple(&builder, {operand1, operand2});
Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
CreateR0TupleSubComputation());
- ComputeAndCompareR0<float>(&builder, 68.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 68.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 2 parameters and predicate is
// false.
XLA_TEST_F(ConditionalOpTest, Parameters2FalseBranch) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 56.0f);
auto operand2 = ConstantR0<float>(&builder, 12.0f);
auto operands = Tuple(&builder, {operand1, operand2});
Conditional(pred, operands, CreateR0TupleAddComputation(), operands,
CreateR0TupleSubComputation());
- ComputeAndCompareR0<float>(&builder, 44.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 44.0f, {pred_arg.get()}, error_spec_);
}
// Test true and false computations that take in 2 array parameters and
// predicate is true.
XLA_TEST_F(ConditionalOpTest, Parameters2ArrayTrueBranch) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, true);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
auto operand1 = ConstantR1<float>(&builder, {24.0f, 56.0f});
auto operand2 = ConstantR1<float>(&builder, {10.0f, 11.0f});
auto operands = Tuple(&builder, {operand1, operand2});
Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
CreateR1TupleSubComputation());
- ComputeAndCompareR1<float>(&builder, {34.0f, 67.0f}, {}, error_spec_);
+ ComputeAndCompareR1<float>(&builder, {34.0f, 67.0f}, {pred_arg.get()},
+ error_spec_);
}
// Test true and false computations that take in 2 array parameters and
// predicate is false.
XLA_TEST_F(ConditionalOpTest, Parameters2ArrayFalseBranch) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR1<float>(&builder, {24.0f, 56.0f});
auto operand2 = ConstantR1<float>(&builder, {10.0f, 11.0f});
auto operands = Tuple(&builder, {operand1, operand2});
Conditional(pred, operands, CreateR1TupleAddComputation(), operands,
CreateR1TupleSubComputation());
- ComputeAndCompareR1<float>(&builder, {14.0f, 45.0f}, {}, error_spec_);
+ ComputeAndCompareR1<float>(&builder, {14.0f, 45.0f}, {pred_arg.get()},
+ error_spec_);
}
// Test true and false computations that return a tuple of scalars.
XLA_TEST_F(ConditionalOpTest, ReturnTupleOfScalars) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operands = Tuple(&builder, {ConstantR0<float>(&builder, 12.2f),
ConstantR0<float>(&builder, 25.6f)});
Conditional(pred, operands, CreateR0TupleCeilComputation(), operands,
@@ -346,13 +361,14 @@ XLA_TEST_F(ConditionalOpTest, ReturnTupleOfScalars) {
&builder,
*LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(12.0f).get(),
LiteralUtil::CreateR0<float>(25.0f).get()}),
- {}, error_spec_);
+ {pred_arg.get()}, error_spec_);
}
// Test true and false computations that return a tuple of arrays.
XLA_TEST_F(ConditionalOpTest, ReturnTupleOfArrays) {
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, true);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
auto operands =
Tuple(&builder, {ConstantR1<float>(&builder, {12.2f, 15.8f}),
ConstantR1<float>(&builder, {25.6f, 29.2f})});
@@ -364,7 +380,7 @@ XLA_TEST_F(ConditionalOpTest, ReturnTupleOfArrays) {
*LiteralUtil::MakeTuple(
{LiteralUtil::CreateR1<float>({13.0f, 16.0f}).get(),
LiteralUtil::CreateR1<float>({26.0f, 30.0f}).get()}),
- {}, error_spec_);
+ {pred_arg.get()}, error_spec_);
}
// Test true and false computations that return a tuple of a predicate, a
@@ -393,7 +409,8 @@ XLA_TEST_F(ConditionalOpTest, ReturnTupleofPredicateScalarArray) {
EXPECT_IS_OK(false_builder_result.status());
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, true);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(true, 0, "pred", &builder, &pred);
auto operands = Tuple(&builder, {});
Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(), operands,
false_builder_result.ConsumeValueOrDie());
@@ -404,7 +421,7 @@ XLA_TEST_F(ConditionalOpTest, ReturnTupleofPredicateScalarArray) {
{LiteralUtil::CreateR0<bool>(true).get(),
LiteralUtil::CreateR0<float>(12.2f).get(),
LiteralUtil::CreateR1<float>({12.8f, 14.6f}).get()}),
- {}, error_spec_);
+ {pred_arg.get()}, error_spec_);
}
// Test true and false computations that return a nested tuple.
@@ -438,7 +455,8 @@ XLA_TEST_F(ConditionalOpTest, ReturnNestedTuple) {
EXPECT_IS_OK(false_builder_result.status());
XlaBuilder builder(TestName());
- auto pred = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operands = Tuple(&builder, {});
Conditional(pred, operands, true_builder_result.ConsumeValueOrDie(), operands,
false_builder_result.ConsumeValueOrDie());
@@ -454,7 +472,7 @@ XLA_TEST_F(ConditionalOpTest, ReturnNestedTuple) {
{LiteralUtil::CreateR1<float>({62.1f, 67.4f}).get(),
LiteralUtil::CreateR0<float>(9.3f).get()})
.get()}),
- {}, error_spec_);
+ {pred_arg.get()}, error_spec_);
}
// Test conditional that takes in scalar operands in the form of external
@@ -515,8 +533,9 @@ XLA_TEST_F(ConditionalOpTest, NestedConditionals) {
EXPECT_IS_OK(inner_builder_result.status());
XlaBuilder builder(TestName());
- auto pred1 = ConstantR0<bool>(&builder, true);
- auto pred2 = ConstantR0<bool>(&builder, false);
+ XlaOp pred1, pred2;
+ auto pred1_arg = CreateR0Parameter<bool>(true, 0, "pred1", &builder, &pred1);
+ auto pred2_arg = CreateR0Parameter<bool>(false, 1, "pred2", &builder, &pred2);
auto operand1 = ConstantR0<float>(&builder, 1.1f);
auto operand2 = ConstantR0<float>(&builder, 12.2f);
auto operand3 = ConstantR0<float>(&builder, 43.3f);
@@ -524,7 +543,8 @@ XLA_TEST_F(ConditionalOpTest, NestedConditionals) {
Conditional(pred1, tuple_operand, inner_builder_result.ConsumeValueOrDie(),
operand3, CreateR0IdentityComputation());
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f,
+ {pred1_arg.get(), pred2_arg.get()}, error_spec_);
}
XLA_TEST_F(ConditionalOpTest, ConditionalInNestedComputation) {
@@ -543,13 +563,14 @@ XLA_TEST_F(ConditionalOpTest, ConditionalInNestedComputation) {
EXPECT_IS_OK(inner_builder_result.status());
XlaBuilder builder(TestName());
- auto pred2 = ConstantR0<bool>(&builder, false);
+ XlaOp pred;
+ auto pred_arg = CreateR0Parameter<bool>(false, 0, "pred", &builder, &pred);
auto operand1 = ConstantR0<float>(&builder, 1.1f);
auto operand2 = ConstantR0<float>(&builder, 12.2f);
- auto tuple_operand = Tuple(&builder, {pred2, operand1, operand2});
+ auto tuple_operand = Tuple(&builder, {pred, operand1, operand2});
Call(&builder, inner_builder_result.ConsumeValueOrDie(), {tuple_operand});
- ComputeAndCompareR0<float>(&builder, 12.0f, {}, error_spec_);
+ ComputeAndCompareR0<float>(&builder, 12.0f, {pred_arg.get()}, error_spec_);
}
// Test a mismatch in the shape of the true operand and true computation.
@@ -604,8 +625,9 @@ XLA_TEST_F(ConditionalOpTest, SwappedInputsInSequentialConditionals) {
auto test_swap = [&](float a, float b) {
XlaBuilder builder(TestName());
- auto x = ConstantR0<float>(&builder, a);
- auto y = ConstantR0<float>(&builder, b);
+ XlaOp x, y;
+ auto x_arg = CreateR0Parameter<float>(a, 0, "x", &builder, &x);
+ auto y_arg = CreateR0Parameter<float>(b, 1, "y", &builder, &y);
auto tuple_operand = Tuple(&builder, {x, y});
Call(&builder, main, {tuple_operand});
@@ -613,7 +635,7 @@ XLA_TEST_F(ConditionalOpTest, SwappedInputsInSequentialConditionals) {
&builder,
*LiteralUtil::MakeTuple({LiteralUtil::CreateR0<float>(a).get(),
LiteralUtil::CreateR0<float>(b).get()}),
- {}, error_spec_);
+ {x_arg.get(), y_arg.get()}, error_spec_);
};
test_swap(3.11f, 9.4f);
diff --git a/tensorflow/compiler/xla/tests/convert_test.cc b/tensorflow/compiler/xla/tests/convert_test.cc
index dca57fd1c7..0fb6853e3f 100644
--- a/tensorflow/compiler/xla/tests/convert_test.cc
+++ b/tensorflow/compiler/xla/tests/convert_test.cc
@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include <array>
#include <cstdint>
#include <limits>
#include <memory>
@@ -52,13 +53,67 @@ TEST_F(ConvertTest, ConvertR1S32ToR1S32) {
ComputeAndCompareR1<int32>(&builder, expected, {});
}
+TEST_F(ConvertTest, ConvertR1S32ToR1U32) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<int32>(&builder, {42, 64});
+ ConvertElementType(a, U32);
+
+ std::vector<uint32> expected = {42, 64};
+ ComputeAndCompareR1<uint32>(&builder, expected, {});
+}
+
+TEST_F(ConvertTest, ConvertR1S32ToR1PRED) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<int32>(&builder, {42, 0, -64});
+ ConvertElementType(a, PRED);
+
+ std::array<bool, 3> expected = {true, false, true};
+ ComputeAndCompareR1<bool>(&builder, expected, {});
+}
+
+TEST_F(ConvertTest, ConvertR1U32ToR1U32) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<uint32>(&builder, {42, 64});
+ ConvertElementType(a, U32);
+
+ std::vector<uint32> expected = {42, 64};
+ ComputeAndCompareR1<uint32>(&builder, expected, {});
+}
+
+TEST_F(ConvertTest, ConvertR1U32ToR1S32) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<uint32>(&builder, {42, 64});
+ ConvertElementType(a, S32);
+
+ std::vector<int32> expected = {42, 64};
+ ComputeAndCompareR1<int32>(&builder, expected, {});
+}
+
+TEST_F(ConvertTest, ConvertR1U32ToR1PRED) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<uint32>(&builder, {42, 0, 64});
+ ConvertElementType(a, PRED);
+
+ std::array<bool, 3> expected = {true, false, true};
+ ComputeAndCompareR1<bool>(&builder, expected, {});
+}
+
TEST_F(ConvertTest, ConvertR1F32ToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<float>(&builder, {42.0f, 64.0f});
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
- ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
+ ComputeAndCompareR1<float>(&builder, expected, {});
+}
+
+TEST_F(ConvertTest, ConvertR1F32ToR1PRED) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<float>(&builder, {42.0f, 0.0f, 64.0f});
+ ConvertElementType(a, PRED);
+
+ std::array<bool, 3> expected = {true, false, true};
+ ComputeAndCompareR1<bool>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
@@ -67,7 +122,7 @@ TEST_F(ConvertTest, ConvertR1S32ToR1F32) {
ConvertElementType(a, F32);
std::vector<float> expected = {42.0f, 64.0f};
- ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
+ ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
@@ -79,6 +134,15 @@ TEST_F(ConvertTest, ConvertR1PREDToR1S32) {
ComputeAndCompareR1<int32>(&builder, expected, {});
}
+TEST_F(ConvertTest, ConvertR1PREDToR1U32) {
+ XlaBuilder builder(TestName());
+ auto a = ConstantR1<bool>(&builder, {true, false, true});
+ ConvertElementType(a, U32);
+
+ std::vector<uint32> expected = {1, 0, 1};
+ ComputeAndCompareR1<uint32>(&builder, expected, {});
+}
+
TEST_F(ConvertTest, ConvertR1PREDToR1F32) {
XlaBuilder builder(TestName());
auto a = ConstantR1<bool>(&builder, {true, false, true});
@@ -94,7 +158,7 @@ XLA_TEST_F(ConvertTest, ConvertR1S0S32ToR1S0F32) {
ConvertElementType(a, F32);
std::vector<float> expected = {};
- ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001));
+ ComputeAndCompareR1<float>(&builder, expected, {});
}
TEST_F(ConvertTest, ConvertR1F32ToR1S32) {
diff --git a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
index b063b6bdef..88ac96d6b0 100644
--- a/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
+++ b/tensorflow/compiler/xla/tests/dynamic_ops_test.cc
@@ -202,18 +202,28 @@ XLA_TEST_F(DynamicSliceTest, Int32R1) { TestR1<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int32R1OOB) { TestR1OOB<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int64R1) { TestR1<int64, float>(); }
XLA_TEST_F(DynamicSliceTest, UInt64R1) { TestR1<uint64, float>(); }
+XLA_TEST_F(DynamicSliceTest, UInt32R1OOB) {
+ RunR1<uint32, int32>({0, 1, 2, 3, 4}, {2147483648u}, {2}, {3, 4});
+}
XLA_TEST_F(DynamicSliceTest, Int32R2BF16) { TestR2<int32, bfloat16>(); }
XLA_TEST_F(DynamicSliceTest, Int32R2) { TestR2<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int32R2OOB) { TestR2OOB<int32, int32>(); }
XLA_TEST_F(DynamicSliceTest, Int64R2) { TestR2<int64, float>(); }
XLA_TEST_F(DynamicSliceTest, UInt64R2) { TestR2<uint64, int32>(); }
+XLA_TEST_F(DynamicSliceTest, UInt32R2OOB) {
+ RunR2<uint32, int32>({{0, 1}, {2, 3}}, {2147483648u, 0}, {1, 1}, {{2}});
+}
XLA_TEST_F(DynamicSliceTest, Int32R3BF16) { TestR3<int32, bfloat16>(); }
XLA_TEST_F(DynamicSliceTest, Int32R3) { TestR3<int32, float>(); }
XLA_TEST_F(DynamicSliceTest, Int32R3OOB) { TestR3OOB<int32, float>(); }
XLA_TEST_F(DynamicSliceTest, Int64R3) { TestR3<int64, float>(); }
XLA_TEST_F(DynamicSliceTest, UInt64R3) { TestR3<uint64, float>(); }
+XLA_TEST_F(DynamicSliceTest, UInt32R3OOB) {
+ RunR3<uint32, int32>({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}},
+ {2147483648u, 0, 2147483648u}, {1, 1, 1}, {{{5}}});
+}
XLA_TEST_F(DynamicSliceTest, Int32R1Pred) {
// Slice at dimension start.
@@ -530,21 +540,32 @@ XLA_TEST_F(DynamicUpdateSliceTest, Int32R0) { TestR0<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R0) { TestR0<int64, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R0) { TestR0<uint64, float>(); }
-// TODO(b/71820067): The CPU parallel backend failed for this on 2018-01-10.
XLA_TEST_F(DynamicUpdateSliceTest, Int32R1BF16) { TestR1<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32R1) { TestR1<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R1) { TestR1<int64, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R1) { TestR1<uint64, float>(); }
+XLA_TEST_F(DynamicUpdateSliceTest, UInt32R1OOB) {
+ RunR1<uint32, int32>({0, 1, 2, 3, 4}, {5, 6}, {2147483648u}, {0, 1, 2, 5, 6});
+}
XLA_TEST_F(DynamicUpdateSliceTest, Int32R2BF16) { TestR2<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32R2) { TestR2<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R2) { TestR2<int64, int64>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R2) { TestR2<uint64, int32>(); }
+XLA_TEST_F(DynamicUpdateSliceTest, UInt32R2OOB) {
+ RunR2<uint32, int32>({{0, 1}, {2, 3}}, {{4}}, {2147483648u, 0},
+ {{0, 1}, {4, 3}});
+}
XLA_TEST_F(DynamicUpdateSliceTest, Int32R3BF16) { TestR3<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32R3) { TestR3<int32, float>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int64R3) { TestR3<int64, int64>(); }
XLA_TEST_F(DynamicUpdateSliceTest, UInt64R3) { TestR3<uint64, uint64>(); }
+XLA_TEST_F(DynamicUpdateSliceTest, UInt32R3OOB) {
+ RunR3<uint32, int32>({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}}, {{{8}}},
+ {2147483648u, 0, 2147483648u},
+ {{{0, 1}, {2, 3}}, {{4, 8}, {6, 7}}});
+}
XLA_TEST_F(DynamicUpdateSliceTest, Int32OOBBF16) { TestOOB<int32, bfloat16>(); }
XLA_TEST_F(DynamicUpdateSliceTest, Int32OOB) { TestOOB<int32, float>(); }
diff --git a/tensorflow/compiler/xla/tests/fusion_test.cc b/tensorflow/compiler/xla/tests/fusion_test.cc
index dc64477935..607bcdd51e 100644
--- a/tensorflow/compiler/xla/tests/fusion_test.cc
+++ b/tensorflow/compiler/xla/tests/fusion_test.cc
@@ -799,6 +799,46 @@ ENTRY main {
*result));
}
+class FusionClientLibraryTest : public ClientLibraryTestBase {};
+
+XLA_TEST_F(FusionClientLibraryTest, ManyLayoutTransformations) {
+ // On the GPU backend, it's possible to have too many transposes within one
+ // fusion, causing the kernel to run out shared memory and thus not compile.
+ // We want to check that doesn't happen.
+ //
+ // To do this, we create a computation that computes
+ //
+ // P0 + P0*P1*P1 + P0*P2*P2 ...
+ //
+ // where even parameters have layout 1 and odd parameters have layout 2.
+ //
+ // Our goal is to tempt the backend into creating one giant multi-output
+ // fusion for the whole computation, including the transposes. Currently
+ // multi-output fusion only fuses fusions, so each of the terms in the sum
+ // needs to be a fusion itself, thus the contortions above.
+ constexpr int kNumParams = 25;
+ XlaBuilder b("ManyLayoutTransformations");
+
+ // This test produces values that overflow int32, which is UB, so use uint32,
+ // where overflow is OK.
+ Array2D<uint32> arr(32, 32);
+ arr.FillUnique();
+ std::unique_ptr<Literal> l1 = LiteralUtil::CreateR2FromArray2D(arr)->Relayout(
+ LayoutUtil::MakeLayout({0, 1}));
+
+ std::unique_ptr<Literal> l2 = LiteralUtil::CreateR2FromArray2D(arr)->Relayout(
+ LayoutUtil::MakeLayout({1, 0}));
+
+ XlaOp p0 = AddParam(*l1, &b);
+ XlaOp sum = p0;
+ for (int i = 1; i < kNumParams; ++i) {
+ auto pN = AddParam((i % 2 == 0 ? *l1 : *l2), &b);
+ sum = sum + p0 * pN * pN;
+ }
+
+ ComputeAndCompare(&b, {});
+}
+
void BM_ParallelFusion(int num_iters) {
// Simple element-wise computation to benchmark parallel task partitioning.
tensorflow::testing::StopTiming();
diff --git a/tensorflow/compiler/xla/tests/iota_test.cc b/tensorflow/compiler/xla/tests/iota_test.cc
new file mode 100644
index 0000000000..f950aa1e8f
--- /dev/null
+++ b/tensorflow/compiler/xla/tests/iota_test.cc
@@ -0,0 +1,61 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <numeric>
+#include <vector>
+
+#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
+#include "tensorflow/core/lib/core/errors.h"
+
+namespace xla {
+namespace {
+
+class IotaTest : public ClientLibraryTestBase {
+ public:
+ explicit IotaTest(se::Platform* platform = nullptr)
+ : ClientLibraryTestBase(platform) {}
+ template <typename T>
+ std::vector<T> GetExpected(const int64 num_elements) {
+ std::vector<T> result(num_elements);
+ std::iota(result.begin(), result.end(), 0);
+ return result;
+ }
+};
+
+TEST_F(IotaTest, SimpleR1) {
+ for (int num_elements = 1; num_elements < 10000001; num_elements *= 10) {
+ {
+ XlaBuilder builder(TestName() + "_f32");
+ IotaGen(&builder, F32, num_elements);
+ ComputeAndCompareR1<float>(&builder, GetExpected<float>(num_elements), {},
+ ErrorSpec{0.0001});
+ }
+ {
+ XlaBuilder builder(TestName() + "_u32");
+ IotaGen(&builder, U32, num_elements);
+ ComputeAndCompareR1<uint32>(&builder, GetExpected<uint32>(num_elements),
+ {});
+ }
+ {
+ XlaBuilder builder(TestName() + "_s32");
+ IotaGen(&builder, S32, num_elements);
+ ComputeAndCompareR1<int32>(&builder, GetExpected<int32>(num_elements),
+ {});
+ }
+ }
+}
+
+} // namespace
+} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/llvm_compiler_test.cc b/tensorflow/compiler/xla/tests/llvm_compiler_test.cc
index 13df83ffff..e719da54d4 100644
--- a/tensorflow/compiler/xla/tests/llvm_compiler_test.cc
+++ b/tensorflow/compiler/xla/tests/llvm_compiler_test.cc
@@ -17,7 +17,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/literal_util.h"
#include "tensorflow/compiler/xla/service/backend.h"
#include "tensorflow/compiler/xla/service/cpu/cpu_compiler.h"
-#include "tensorflow/compiler/xla/service/gpu/gpu_compiler.h"
+#include "tensorflow/compiler/xla/service/gpu/nvptx_compiler.h"
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/platform_util.h"
#include "tensorflow/compiler/xla/test_helpers.h"
@@ -145,7 +145,7 @@ TEST_F(CpuCompilerTest, HooksTest) {
}
TEST_F(GpuCompilerTest, HooksTest) {
- gpu::GpuCompiler compiler;
+ gpu::NVPTXCompiler compiler;
TestCompilerHooks(&compiler);
}
@@ -155,7 +155,7 @@ TEST_F(CpuCompilerTest, MultiModuleCompilation) {
}
TEST_F(GpuCompilerTest, MultModuleCompilation) {
- gpu::GpuCompiler compiler;
+ gpu::NVPTXCompiler compiler;
TestMultiModuleCompilation(&compiler);
}
} // namespace
diff --git a/tensorflow/compiler/xla/tests/local_client_execute_test.cc b/tensorflow/compiler/xla/tests/local_client_execute_test.cc
index 7c003fb81f..5c3498c84c 100644
--- a/tensorflow/compiler/xla/tests/local_client_execute_test.cc
+++ b/tensorflow/compiler/xla/tests/local_client_execute_test.cc
@@ -772,6 +772,10 @@ XLA_TEST_F(LocalClientExecuteTest, CompileExecutable) {
ScopedShapedBuffer result =
executable->Run({&x_array}, DefaultExecutableRunOptions())
.ConsumeValueOrDie();
+ ASSERT_IS_OK(local_client_->mutable_backend()
+ ->BorrowStream(0)
+ .ValueOrDie()
+ ->BlockHostUntilDone());
LiteralTestUtil::ExpectR1Near<float>(
{2.0f, 4.0f, 6.0f}, *ShapedBufferToLiteral(result), error_spec_);
@@ -866,9 +870,7 @@ XLA_TEST_F(LocalClientExecuteTest, InfeedTest) {
LiteralTestUtil::ExpectR1Equal<float>({-4.0, 125.0, 45.0}, *result);
}
-// TODO(b/34359662): Support infeed/outfeed on GPU and CPU parallel.
-// 2017-10-18.
-XLA_TEST_F(LocalClientExecuteTest, DISABLED_ON_GPU(InfeedOutfeedTest)) {
+XLA_TEST_F(LocalClientExecuteTest, InfeedOutfeedTest) {
XlaBuilder builder(TestName());
const Shape shape = ShapeUtil::MakeShape(F32, {3});
auto in = Infeed(&builder, shape);
diff --git a/tensorflow/compiler/xla/tests/local_client_test_base.cc b/tensorflow/compiler/xla/tests/local_client_test_base.cc
index 88797a7d0a..c31ba0e713 100644
--- a/tensorflow/compiler/xla/tests/local_client_test_base.cc
+++ b/tensorflow/compiler/xla/tests/local_client_test_base.cc
@@ -189,7 +189,19 @@ StatusOr<ScopedShapedBuffer> LocalClientTestBase::ExecuteLocally(
TF_ASSIGN_OR_RETURN(
std::unique_ptr<LocalExecutable> executable,
local_client_->Compile(computation, argument_layouts, build_options));
- return executable->Run(arguments, run_options);
+ TF_ASSIGN_OR_RETURN(auto ret, executable->Run(arguments, run_options));
+
+ auto device_ordinal =
+ build_options.device_ordinal() == -1 ? 0 : build_options.device_ordinal();
+ auto* stream = run_options.stream();
+ if (!stream) {
+ stream = local_client_->mutable_backend()
+ ->BorrowStream(device_ordinal)
+ .ValueOrDie()
+ .get();
+ }
+ TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
+ return std::move(ret);
}
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/prng_test.cc b/tensorflow/compiler/xla/tests/prng_test.cc
index 5ebf8344d2..3f98099be6 100644
--- a/tensorflow/compiler/xla/tests/prng_test.cc
+++ b/tensorflow/compiler/xla/tests/prng_test.cc
@@ -177,7 +177,7 @@ XLA_TEST_F(PrngTest, Uniformity108) {
EXPECT_LT(UniformChiSquared(108, 256), 132.144);
}
XLA_TEST_F(PrngTest, Uniformity256) {
- EXPECT_LT(UniformChiSquared(256, 256), 293.248);
+ EXPECT_LT(UniformChiSquared(256, 512), 293.248);
}
XLA_TEST_F(PrngTest, MapUsingRng) {
diff --git a/tensorflow/compiler/xla/tests/reduce_test.cc b/tensorflow/compiler/xla/tests/reduce_test.cc
index 1407fca72f..e4a8ddf86a 100644
--- a/tensorflow/compiler/xla/tests/reduce_test.cc
+++ b/tensorflow/compiler/xla/tests/reduce_test.cc
@@ -125,10 +125,10 @@ class ReduceTest : public ClientLibraryTestBase {
XlaComputation reduce;
if (and_reduce) {
init_value = ConstantR0<bool>(&builder, true);
- reduce = CreateScalarAndComputation(&builder);
+ reduce = CreateScalarAndComputation(PRED, &builder);
} else {
init_value = ConstantR0<bool>(&builder, false);
- reduce = CreateScalarOrComputation(&builder);
+ reduce = CreateScalarOrComputation(PRED, &builder);
}
Reduce(pred_values, init_value, reduce,
/*dimensions_to_reduce=*/{0});
@@ -163,10 +163,10 @@ class ReduceTest : public ClientLibraryTestBase {
XlaComputation reduce_op;
if (and_reduce) {
init_value = ConstantR0<bool>(&builder, true);
- reduce_op = CreateScalarAndComputation(&builder);
+ reduce_op = CreateScalarAndComputation(PRED, &builder);
} else {
init_value = ConstantR0<bool>(&builder, false);
- reduce_op = CreateScalarOrComputation(&builder);
+ reduce_op = CreateScalarOrComputation(PRED, &builder);
}
Reduce(input_pred, init_value, reduce_op,
@@ -798,13 +798,17 @@ XLA_TEST_F(ReduceTest, VectorizedReduce_Min) {
XLA_TEST_F(ReduceTest, VectorizedReduce_BooleanAnd) {
RunVectorizedReduceTestForType<bool>(
- static_cast<FuncGenerator>(CreateScalarAndComputation),
+ static_cast<FuncGenerator>([](XlaBuilder* builder) {
+ return CreateScalarAndComputation(PRED, builder);
+ }),
[](bool a, bool b) { return a && b; }, true);
}
XLA_TEST_F(ReduceTest, VectorizedReduce_BooleanOr) {
RunVectorizedReduceTestForType<bool>(
- static_cast<FuncGenerator>(CreateScalarOrComputation),
+ static_cast<FuncGenerator>([](XlaBuilder* builder) {
+ return CreateScalarOrComputation(PRED, builder);
+ }),
[](bool a, bool b) { return a || b; }, false);
}
@@ -963,5 +967,32 @@ XLA_TEST_F(ReduceTest, ReduceIdentity) {
ErrorSpec(0.0001));
}
+XLA_TEST_F(ReduceTest, AndReduceU64) {
+ XlaBuilder builder(TestName());
+ Array2D<uint64> initializer = {{0x123456789ABCDEF0LL, 0x3BCDEF12A4567890LL},
+ {0XFFFFFFFFFFFFFFD6LL, 101},
+ {1, 0XFFFFFFFFFFFFFFFFLL}};
+ auto reducer = CreateScalarAndComputation(U64, &builder);
+ auto m = ConstantR2FromArray2D(&builder, initializer);
+ Reduce(m, ConstantR0<uint64>(&builder, 0xFFFFFFFFFFFFFFFFLL), reducer, {1});
+
+ std::vector<uint64> expected = {0x1204461080145890LL, 68, 1};
+ ComputeAndCompareR1<uint64>(&builder, expected, {});
+}
+
+XLA_TEST_F(ReduceTest, OrReduceU64) {
+ XlaBuilder builder(TestName());
+ Array2D<uint64> initializer = {{0x123456789ABCDEF0LL, 0x3BCDEF12A4567890LL},
+ {0xFFFFFFFFFFFFFFD6LL, 101},
+ {1, 0xCAFEBEEFABABABABLL}};
+ auto reducer = CreateScalarOrComputation(U64, &builder);
+ auto m = ConstantR2FromArray2D(&builder, initializer);
+ Reduce(m, ConstantR0<uint64>(&builder, 0), reducer, {1});
+
+ std::vector<uint64> expected = {0X3BFDFF7ABEFEFEF0LL, 0XFFFFFFFFFFFFFFF7LL,
+ 0xCAFEBEEFABABABABLL};
+ ComputeAndCompareR1<uint64>(&builder, expected, {});
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/slice_test.cc b/tensorflow/compiler/xla/tests/slice_test.cc
index 48138e7b07..a593faca00 100644
--- a/tensorflow/compiler/xla/tests/slice_test.cc
+++ b/tensorflow/compiler/xla/tests/slice_test.cc
@@ -344,7 +344,11 @@ INSTANTIATE_TEST_CASE_P(
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 2},
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 8},
R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 7},
- R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 125}
+ R1Spec{1024 * 1024 + 71, 3, 1024 * 512 - 9, 125},
+ R1Spec{16 * 1024 * 1024, 0, 16 * 1024 * 1024, 4097},
+ R1Spec{16 * 1024 * 1024, 0, 16 * 1024 * 1024, 4093},
+ R1Spec{16 * 1024 * 1024, 12 * 1024 + 17, 16 * 1024 * 1024 - 231, 4097},
+ R1Spec{16 * 1024 * 1024, 12 * 1024 + 17, 16 * 1024 * 1024 - 231, 4093}
),
SliceR1TestDataToString
);
diff --git a/tensorflow/compiler/xla/tests/tuple_test.cc b/tensorflow/compiler/xla/tests/tuple_test.cc
index bf86c5dfb6..a517007591 100644
--- a/tensorflow/compiler/xla/tests/tuple_test.cc
+++ b/tensorflow/compiler/xla/tests/tuple_test.cc
@@ -29,6 +29,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/tests/literal_test_util.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace xla {
@@ -545,5 +546,51 @@ XLA_TEST_F(TupleHloTest, DISABLED_ON_INTERPRETER(BitcastAfterGTE)) {
*result));
}
+// Disabled on interpreter due to lack of outfeed.
+XLA_TEST_F(TupleHloTest,
+ DISABLED_ON_INTERPRETER(NonAmbiguousTopLevelAllocation)) {
+ const char* testcase = R"(
+ HloModule tuple
+
+ ENTRY main {
+ a = f32[2] parameter(0)
+ b = f32[2] parameter(1)
+ c = f32[2] parameter(2)
+ d = f32[2] parameter(3)
+ cond = pred[] parameter(4)
+
+ tup0 = (f32[2],f32[2]) tuple(a, b)
+ tup1 = (f32[2],f32[2]) tuple(c, d)
+
+ s = (f32[2],f32[2]) tuple-select(cond, tup0, tup1)
+ gte = f32[2] get-tuple-element(s), index=0
+ tuple = (f32[2]) tuple(gte)
+ token = token[] after-all()
+ ROOT outfeed = token[] outfeed(tuple, token)
+ }
+ )";
+ auto module =
+ HloRunner::CreateModuleFromString(testcase, GetDebugOptionsForTest())
+ .ValueOrDie();
+ auto param0 = LiteralUtil::CreateR1<float>({1, 2});
+ auto param1 = LiteralUtil::CreateR1<float>({2, 3});
+ auto param4 = LiteralUtil::CreateR0<bool>(false);
+ // Put execution on a separate thread so we can block on outfeed.
+ std::unique_ptr<tensorflow::Thread> thread(
+ tensorflow::Env::Default()->StartThread(
+ tensorflow::ThreadOptions(), "execute_thread", [&] {
+ TF_EXPECT_OK(Execute(std::move(module),
+ {param0.get(), param1.get(), param1.get(),
+ param0.get(), param4.get()})
+ .status());
+ }));
+ auto expected =
+ LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR1<float>({2, 3}));
+ auto literal = MakeUnique<Literal>();
+ TF_EXPECT_OK(backend().transfer_manager()->TransferLiteralFromOutfeed(
+ backend().default_stream_executor(), expected->shape(), literal.get()));
+ EXPECT_TRUE(LiteralTestUtil::Equal(*expected, *literal));
+}
+
} // namespace
} // namespace xla
diff --git a/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc b/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
index 4d4dd62a3f..c000ff4dc8 100644
--- a/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
+++ b/tensorflow/compiler/xla/tests/xla_hlo_profile_test.cc
@@ -172,6 +172,7 @@ void ExecuteAndFetchProfile(string* profile_output, LocalClient* client,
auto execution_result,
executable->ExecuteOnStream(&run_options, {&lhs_arg, &rhs_arg},
&hlo_execution_profile));
+ TF_ASSERT_OK(stream_ptr->BlockHostUntilDone());
(void)execution_result;
*profile_output =
diff --git a/tensorflow/compiler/xla/xla.proto b/tensorflow/compiler/xla/xla.proto
index 6f07e4606b..10c0adc670 100644
--- a/tensorflow/compiler/xla/xla.proto
+++ b/tensorflow/compiler/xla/xla.proto
@@ -293,6 +293,7 @@ message ComputationStatsResponse {
}
message CreateChannelHandleRequest {
+ ChannelHandle.ChannelType channel_type = 1;
}
message CreateChannelHandleResponse {
diff --git a/tensorflow/compiler/xla/xla_data.proto b/tensorflow/compiler/xla/xla_data.proto
index c7472173a7..0b300dc7b2 100644
--- a/tensorflow/compiler/xla/xla_data.proto
+++ b/tensorflow/compiler/xla/xla_data.proto
@@ -308,6 +308,22 @@ message DeviceHandle {
// Send instructions will be blocked until the data is transferred.
message ChannelHandle {
int64 handle = 1;
+ enum ChannelType {
+ // Invalid primitive type to serve as default.
+ CHANNEL_TYPE_INVALID = 0;
+
+ // A channel for sending data between devices.
+ DEVICE_TO_DEVICE = 1;
+
+ // A channel for sending data from the device to the host. Can only be used
+ // with a Send operation.
+ DEVICE_TO_HOST = 2;
+
+ // A channel for sending data from the host to the device. Can only be used
+ // with a Recv operation.
+ HOST_TO_DEVICE = 3;
+ }
+ ChannelType type = 2;
}
// DeviceAssignmentProto is a serialized form of DeviceAssignment class, which
diff --git a/tensorflow/contrib/BUILD b/tensorflow/contrib/BUILD
index 60be9db263..a173c51879 100644
--- a/tensorflow/contrib/BUILD
+++ b/tensorflow/contrib/BUILD
@@ -7,7 +7,6 @@ package(default_visibility = ["//tensorflow:__subpackages__"])
load("//third_party/mpi:mpi.bzl", "if_mpi")
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
load("//tensorflow:tensorflow.bzl", "if_not_windows")
load("//tensorflow:tensorflow.bzl", "if_not_windows_cuda")
@@ -113,9 +112,7 @@ py_library(
"//tensorflow/contrib/util:util_py",
"//tensorflow/python:util",
"//tensorflow/python/estimator:estimator_py",
- ] + if_mpi(["//tensorflow/contrib/mpi_collectives:mpi_collectives_py"]) + if_tensorrt([
- "//tensorflow/contrib/tensorrt:init_py",
- ]) + select({
+ ] + if_mpi(["//tensorflow/contrib/mpi_collectives:mpi_collectives_py"]) + select({
"//tensorflow:with_kafka_support_windows_override": [],
"//tensorflow:with_kafka_support": [
"//tensorflow/contrib/kafka",
@@ -134,6 +131,7 @@ py_library(
"//tensorflow/contrib/cloud:cloud_py", # doesn't compile on Windows
"//tensorflow/contrib/ffmpeg:ffmpeg_ops_py",
"//tensorflow/contrib/lite/python:lite", # unix dependency, need to fix code
+ "//tensorflow/contrib/tensorrt:init_py", # doesn't compile on windows
]),
)
diff --git a/tensorflow/contrib/android/cmake/src/main/AndroidManifest.xml b/tensorflow/contrib/android/cmake/src/main/AndroidManifest.xml
index bced47e046..c17110a78b 100644
--- a/tensorflow/contrib/android/cmake/src/main/AndroidManifest.xml
+++ b/tensorflow/contrib/android/cmake/src/main/AndroidManifest.xml
@@ -1,6 +1,10 @@
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="org.tensorflow.contrib.android">
+ <uses-sdk
+ android:minSdkVersion="4"
+ android:targetSdkVersion="19" />
+
<application android:allowBackup="true" android:label="@string/app_name"
android:supportsRtl="true">
diff --git a/tensorflow/contrib/autograph/README.md b/tensorflow/contrib/autograph/README.md
index 679ab48e5c..cc54da4daa 100644
--- a/tensorflow/contrib/autograph/README.md
+++ b/tensorflow/contrib/autograph/README.md
@@ -1,6 +1,6 @@
# AutoGraph
-IMPORTANT: AutoGraph is alpha software, and under active development. Expect rough edges and bugs, but if you try it, we appreciate early feedback! We'd also love contributions ([please see our contributing guidelines](CONTRIBUTING.md) and our [style guide](STYLE_GUIDE.md)).
+IMPORTANT: AutoGraph is beta software, and under active development. Expect rough edges and bugs, but if you try it, we appreciate early feedback! We'd also love contributions ([please see our contributing guidelines](CONTRIBUTING.md) and our [style guide](STYLE_GUIDE.md)).
AutoGraph is a Python to TensorFlow compiler.
@@ -68,12 +68,21 @@ Then import the `autograph` module from `tf.contrib`:
from tensorflow.contrib import autograph as ag
```
-### Interactive demo notebooks
+### Related links
-For more extensive examples, check out these interactive notebooks:
+Articles:
- * [RNN trained using Keras and Estimators](https://colab.sandbox.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb)
+ * [TensorFlow blog post](https://medium.com/tensorflow/autograph-converts-python-into-tensorflow-graphs-b2a871f87ec7)
+
+Interactive notebooks:
+
+ * [Quick guide](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/guide/autograph.ipynb)
+ * [RNN trained using Keras and Estimators](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/rnn_keras_estimator.ipynb)
* [Demo from the TF Dev Summit 2018](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb)
+ * [Basic control flow speed test](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb)
+ * [MNIST training speed test](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb)
+ * [Basic algorithm samples](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/algorithms.ipynb)
+ * [Introductory workshop support notebook](https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb)
## Using with annotations
diff --git a/tensorflow/contrib/autograph/__init__.py b/tensorflow/contrib/autograph/__init__.py
index 4f8ef2d8a1..26e7a4a4d3 100644
--- a/tensorflow/contrib/autograph/__init__.py
+++ b/tensorflow/contrib/autograph/__init__.py
@@ -22,21 +22,21 @@ from __future__ import division
from __future__ import print_function
# TODO(mdan): Bring only the relevant symbols to the top level.
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph import operators
+from tensorflow.contrib.autograph import utils
+from tensorflow.contrib.autograph.core.errors import GraphConstructionError
+from tensorflow.contrib.autograph.core.errors import TfRuntimeError
+from tensorflow.contrib.autograph.core.errors import improved_errors
+from tensorflow.contrib.autograph.impl.api import RunMode
from tensorflow.contrib.autograph.impl.api import convert
from tensorflow.contrib.autograph.impl.api import converted_call
from tensorflow.contrib.autograph.impl.api import do_not_convert
-from tensorflow.contrib.autograph.impl.api import RunMode
from tensorflow.contrib.autograph.impl.api import to_code
-from tensorflow.contrib.autograph.core.errors import improved_errors
-from tensorflow.contrib.autograph.core.errors import rewrite_graph_construction_error
-from tensorflow.contrib.autograph.core.errors import GraphConstructionError
-from tensorflow.contrib.autograph.core.errors import TfRuntimeError
from tensorflow.contrib.autograph.impl.api import to_graph
from tensorflow.contrib.autograph.lang.directives import set_element_type
from tensorflow.contrib.autograph.lang.directives import set_loop_options
from tensorflow.contrib.autograph.lang.special_functions import stack
+from tensorflow.contrib.autograph.lang.special_functions import tensor_list
from tensorflow.contrib.autograph.pyct.transformer import AutographParseError
from tensorflow.python.util.all_util import remove_undocumented
@@ -46,16 +46,19 @@ _allowed_symbols = [
'convert',
'converted_call',
'do_not_convert',
- 'improved_errors',
'to_code',
'to_graph',
# Overloaded operators
'operators',
- 'rewrite_graph_construction_error',
+ # Errors
+ 'improved_errors',
+ 'GraphConstructionError',
+ 'TfRuntimeError',
# Python language "extensions"
'set_element_type',
'set_loop_options',
'stack',
+ 'tensor_list',
# Exceptions
'AutographParseError',
# Utilities: to be removed
diff --git a/tensorflow/contrib/autograph/converters/BUILD b/tensorflow/contrib/autograph/converters/BUILD
index b2e2e27673..7cbba71683 100644
--- a/tensorflow/contrib/autograph/converters/BUILD
+++ b/tensorflow/contrib/autograph/converters/BUILD
@@ -21,16 +21,18 @@ py_library(
"break_statements.py",
"builtin_functions.py",
"call_trees.py",
+ "conditional_expressions.py",
"continue_statements.py",
"control_flow.py",
"decorators.py",
- "ifexp.py",
- "list_comprehension.py",
+ "directives.py",
+ "error_handlers.py",
+ "list_comprehensions.py",
"lists.py",
"logical_expressions.py",
"name_scopes.py",
+ "return_statements.py",
"side_effect_guards.py",
- "single_return.py",
"slices.py",
],
srcs_version = "PY2AND3",
@@ -95,6 +97,17 @@ py_test(
)
py_test(
+ name = "conditional_expressions_test",
+ srcs = ["conditional_expressions_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":converters",
+ "//tensorflow/contrib/autograph/core:test_lib",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+py_test(
name = "continue_statements_test",
srcs = ["continue_statements_test.py"],
srcs_version = "PY2AND3",
@@ -132,6 +145,18 @@ py_test(
)
py_test(
+ name = "directives_test",
+ srcs = ["directives_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":converters",
+ "//tensorflow/contrib/autograph/core:test_lib",
+ "//tensorflow/contrib/autograph/lang",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+py_test(
name = "name_scopes_test",
srcs = ["name_scopes_test.py"],
deps = [
@@ -143,8 +168,8 @@ py_test(
)
py_test(
- name = "list_comprehension_test",
- srcs = ["list_comprehension_test.py"],
+ name = "list_comprehensions_test",
+ srcs = ["list_comprehensions_test.py"],
srcs_version = "PY2AND3",
deps = [
":converters",
@@ -179,11 +204,6 @@ py_test(
name = "side_effect_guards_test",
srcs = ["side_effect_guards_test.py"],
srcs_version = "PY2AND3",
- tags = [
- # TODO(mdan): Fix.
- "flaky",
- "notap",
- ],
deps = [
":converters",
"//tensorflow/contrib/autograph/core:test_lib",
@@ -192,8 +212,8 @@ py_test(
)
py_test(
- name = "single_return_test",
- srcs = ["single_return_test.py"],
+ name = "return_statements_test",
+ srcs = ["return_statements_test.py"],
srcs_version = "PY2AND3",
deps = [
":converters",
@@ -204,8 +224,8 @@ py_test(
)
py_test(
- name = "ifexp_test",
- srcs = ["ifexp_test.py"],
+ name = "error_handlers_test",
+ srcs = ["error_handlers_test.py"],
srcs_version = "PY2AND3",
deps = [
":converters",
diff --git a/tensorflow/contrib/autograph/converters/asserts.py b/tensorflow/contrib/autograph/converters/asserts.py
index e664a403a5..af2f20f267 100644
--- a/tensorflow/contrib/autograph/converters/asserts.py
+++ b/tensorflow/contrib/autograph/converters/asserts.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Converts Assert statements to their corresponding TF calls."""
+"""Converts assert statements to their corresponding TF calls."""
from __future__ import absolute_import
from __future__ import division
@@ -24,8 +24,8 @@ from tensorflow.contrib.autograph.core import converter
from tensorflow.contrib.autograph.pyct import templates
-class AssertsTransformer(converter.Base):
- """Transforms Print nodes to Call so they can be handled as functions."""
+class AssertTransformer(converter.Base):
+ """Transforms Assert nodes to Call so they can be handled as functions."""
def visit_Assert(self, node):
self.generic_visit(node)
@@ -46,4 +46,4 @@ class AssertsTransformer(converter.Base):
def transform(node, ctx):
- return AssertsTransformer(ctx).visit(node)
+ return AssertTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/asserts_test.py b/tensorflow/contrib/autograph/converters/asserts_test.py
index 2cd0e626bc..9c58ae3acc 100644
--- a/tensorflow/contrib/autograph/converters/asserts_test.py
+++ b/tensorflow/contrib/autograph/converters/asserts_test.py
@@ -32,8 +32,8 @@ class AssertsTest(converter_testing.TestCase):
def test_fn(a):
assert a > 0
- node = self.parse_and_analyze(test_fn, {})
- node = asserts.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = asserts.transform(node, ctx)
self.assertTrue(isinstance(node.body[0].body[0].value, gast.Call))
diff --git a/tensorflow/contrib/autograph/converters/break_statements.py b/tensorflow/contrib/autograph/converters/break_statements.py
index a990e359a2..2a60750bda 100644
--- a/tensorflow/contrib/autograph/converters/break_statements.py
+++ b/tensorflow/contrib/autograph/converters/break_statements.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Canonicalizes break statements by de-sugaring into a control boolean."""
+"""Lowers break statements to conditionals."""
from __future__ import absolute_import
from __future__ import division
@@ -24,17 +24,22 @@ from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
-# Tags for local state.
-BREAK_USED = 'break_used'
-CONTROL_VAR_NAME = 'control_var_name'
+class _Break(object):
+ def __init__(self):
+ self.used = False
+ self.control_var_name = None
-class BreakStatementTransformer(converter.Base):
+ def __repr__(self):
+ return 'used: %s, var: %s' % (self.used, self.control_var_name)
+
+
+class BreakTransformer(converter.Base):
"""Canonicalizes break statements into additional conditionals."""
def visit_Break(self, node):
- self.set_local(BREAK_USED, True)
- var_name = self.get_local(CONTROL_VAR_NAME)
+ self.state[_Break].used = True
+ var_name = self.state[_Break].control_var_name
# TODO(mdan): This will fail when expanded inside a top-level else block.
template = """
var_name = True
@@ -57,12 +62,12 @@ class BreakStatementTransformer(converter.Base):
block=block)
return node
- def _track_body(self, nodes, break_var):
- self.enter_local_scope()
- self.set_local(CONTROL_VAR_NAME, break_var)
+ def _process_body(self, nodes, break_var):
+ self.state[_Break].enter()
+ self.state[_Break].control_var_name = break_var
nodes = self.visit_block(nodes)
- break_used = self.get_local(BREAK_USED, False)
- self.exit_local_scope()
+ break_used = self.state[_Break].used
+ self.state[_Break].exit()
return nodes, break_used
def visit_While(self, node):
@@ -70,7 +75,7 @@ class BreakStatementTransformer(converter.Base):
break_var = self.ctx.namer.new_symbol('break_', scope.referenced)
node.test = self.visit(node.test)
- node.body, break_used = self._track_body(node.body, break_var)
+ node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
@@ -101,7 +106,7 @@ class BreakStatementTransformer(converter.Base):
node.target = self.visit(node.target)
node.iter = self.visit(node.iter)
- node.body, break_used = self._track_body(node.body, break_var)
+ node.body, break_used = self._process_body(node.body, break_var)
# A break in the else clause applies to the containing scope.
node.orelse = self.visit_block(node.orelse)
@@ -138,4 +143,4 @@ class BreakStatementTransformer(converter.Base):
def transform(node, ctx):
- return BreakStatementTransformer(ctx).visit(node)
+ return BreakTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/break_statements_test.py b/tensorflow/contrib/autograph/converters/break_statements_test.py
index dcff1c54c2..c26ca2946c 100644
--- a/tensorflow/contrib/autograph/converters/break_statements_test.py
+++ b/tensorflow/contrib/autograph/converters/break_statements_test.py
@@ -25,7 +25,11 @@ from tensorflow.python.platform import test
class BreakCanonicalizationTest(converter_testing.TestCase):
- def test_basic_while(self):
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ with self.converted(test_fn, break_statements, {}) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_while_loop(self):
def test_fn(x):
v = []
@@ -36,15 +40,11 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual([], result.test_fn(0))
- self.assertEqual([], result.test_fn(1))
- self.assertEqual([3], result.test_fn(4))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 1)
+ self.assertTransformedEquivalent(test_fn, 4)
- def test_basic_for(self):
+ def test_for_loop(self):
def test_fn(a):
v = []
@@ -55,18 +55,12 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, break_statements, {}) as result:
# The break is incompletely canonicalized. The loop will not interrupt,
# but the section following the break will be skipped.
- self.assertEqual([], result.test_fn([]))
- self.assertEqual([3, 3], result.test_fn([4, 4]))
- self.assertEqual([3], result.test_fn([4, 5]))
self.assertEqual([3], result.test_fn([5, 4]))
- def test_deeply_nested(self):
+ def test_nested(self):
def test_fn(x):
v = []
@@ -83,13 +77,9 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u, w
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(([], [], []), result.test_fn(0))
- self.assertEqual(([2, 1], [2], [0]), result.test_fn(3))
- self.assertEqual(([10, 9, 8, 7], [10, 8], [6]), result.test_fn(11))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 11)
def test_nested_loops(self):
@@ -109,16 +99,12 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(([], []), result.test_fn(0))
- self.assertEqual(([1], []), result.test_fn(2))
- self.assertEqual(([2, 1], [1]), result.test_fn(3))
- self.assertEqual(([4, 3, 2, 1], [3, 1]), result.test_fn(5))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 5)
- def test_loop_else(self):
+ def test_loop_orelse(self):
def test_fn(x):
v = []
@@ -134,13 +120,9 @@ class BreakCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u
- node = self.parse_and_analyze(test_fn, {})
- node = break_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(([], []), result.test_fn(0))
- self.assertEqual(([], [1]), result.test_fn(2))
- self.assertEqual(([2], [1]), result.test_fn(3))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, 3)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/builtin_functions_test.py b/tensorflow/contrib/autograph/converters/builtin_functions_test.py
index e9000e518c..d5c3e2c250 100644
--- a/tensorflow/contrib/autograph/converters/builtin_functions_test.py
+++ b/tensorflow/contrib/autograph/converters/builtin_functions_test.py
@@ -18,8 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import sys
-
import six
from tensorflow.contrib.autograph.converters import builtin_functions
@@ -36,55 +34,39 @@ class BuiltinFunctionsTest(converter_testing.TestCase):
def test_fn(a):
return len(a)
- node = self.parse_and_analyze(test_fn, {'len': len})
- node = builtin_functions.transform(node, self.ctx)
-
- with self.compiled(node, array_ops.shape) as result:
+ with self.converted(test_fn, builtin_functions, {'len': len},
+ array_ops.shape) as result:
with self.test_session() as sess:
- self.assertEqual(3,
- sess.run(
- result.test_fn(constant_op.constant([0, 0, 0]))))
-
- self.assertEqual(3, result.test_fn([0, 0, 0]))
+ ops = result.test_fn(constant_op.constant([0, 0, 0]))
+ self.assertEqual(sess.run(ops), 3)
def test_print(self):
- def test_fn(a):
- print(a)
+ if six.PY2:
+ return
- node = self.parse_and_analyze(test_fn, {'print': print})
- node = builtin_functions.transform(node, self.ctx)
+ def test_fn(a):
+ return print(a)
- with self.compiled(node) as result:
+ with self.converted(test_fn, builtin_functions, {'print': print}) as result:
with self.test_session() as sess:
- try:
- out_capturer = six.StringIO()
- sys.stdout = out_capturer
- result.test_fn(constant_op.constant('a'))
- sess.run(sess.graph.get_operations())
- self.assertEqual(out_capturer.getvalue(), 'a\n')
- finally:
- sys.stdout = sys.__stdout__
+ with self.assertPrints('a\n'):
+ sess.run(result.test_fn('a'))
- def test_print_with_op_multiple_values(self):
+ def test_print_multiple_values(self):
- def test_fn(a, b, c):
- print(a, b, c)
+ if six.PY2:
+ return
- node = self.parse_and_analyze(test_fn, {'print': print})
- node = builtin_functions.transform(node, self.ctx)
+ def test_fn(a, b, c):
+ return print(a, b, c)
- with self.compiled(node) as result:
+ with self.converted(test_fn, builtin_functions, {'print': print}) as result:
with self.test_session() as sess:
- try:
- out_capturer = six.StringIO()
- sys.stdout = out_capturer
- result.test_fn(
- constant_op.constant('a'), constant_op.constant(1), [2, 3])
- sess.run(sess.graph.get_operations())
- self.assertEqual(out_capturer.getvalue(), 'a 1 [2, 3]\n')
- finally:
- sys.stdout = sys.__stdout__
+ with self.assertPrints('a 1 [2, 3]\n'):
+ sess.run(
+ result.test_fn(
+ constant_op.constant('a'), constant_op.constant(1), [2, 3]))
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/call_trees_test.py b/tensorflow/contrib/autograph/converters/call_trees_test.py
index 27d8281b85..8cdba659ee 100644
--- a/tensorflow/contrib/autograph/converters/call_trees_test.py
+++ b/tensorflow/contrib/autograph/converters/call_trees_test.py
@@ -36,37 +36,34 @@ class CallTreesTest(converter_testing.TestCase):
def test_fn_1(_):
raise ValueError('This should not be called in the compiled version.')
- def renamed_test_fn_1(a):
+ def other_test_fn_1(a):
return a + 1
def test_fn_2(a):
return test_fn_1(a) + 1
- node = self.parse_and_analyze(test_fn_2, {'test_fn_1': test_fn_1})
- node = call_trees.transform(node, self.ctx)
+ ns = {'test_fn_1': test_fn_1}
+ node, ctx = self.prepare(test_fn_2, ns)
+ node = call_trees.transform(node, ctx)
- with self.compiled(node) as result:
- # Only test_fn_2 is transformed, so we'll insert renamed_test_fn_1
- # manually.
- result.renamed_test_fn_1 = renamed_test_fn_1
- self.assertEquals(3, result.test_fn_2(1))
+ with self.compiled(node, ns) as result:
+ new_name, _ = ctx.namer.compiled_function_name(('test_fn_1',))
+ setattr(result, new_name, other_test_fn_1)
+ self.assertEquals(result.test_fn_2(1), 3)
def test_dynamic_function(self):
def test_fn_1():
- raise ValueError('This should be masked by the mock.')
+ raise ValueError('This should be masked by the mock in self.compiled.')
def test_fn_2(f):
return f() + 3
- node = self.parse_and_analyze(test_fn_2, {})
- node = call_trees.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn_2, call_trees, {}) as result:
# 10 = 7 (from the mock) + 3 (from test_fn_2)
self.assertEquals(10, result.test_fn_2(test_fn_1))
- def test_simple_methods(self):
+ def test_basic_method(self):
class TestClass(object):
@@ -76,49 +73,43 @@ class CallTreesTest(converter_testing.TestCase):
def test_fn_2(self, a):
return self.test_fn_1(a) + 1
- node = self.parse_and_analyze(
- TestClass.test_fn_2, {'TestClass': TestClass},
+ ns = {'TestClass': TestClass}
+ node, ctx = self.prepare(
+ TestClass.test_fn_2,
+ ns,
namer=converter_testing.FakeNoRenameNamer(),
arg_types={'self': (TestClass.__name__, TestClass)})
- node = call_trees.transform(node, self.ctx)
+ node = call_trees.transform(node, ctx)
- with self.compiled(node) as result:
+ with self.compiled(node, ns) as result:
tc = TestClass()
self.assertEquals(3, result.test_fn_2(tc, 1))
- def test_py_func_wrap_no_retval(self):
+ def test_py_func_no_retval(self):
def test_fn(a):
setattr(a, 'foo', 'bar')
- node = self.parse_and_analyze(test_fn, {'setattr': setattr})
- node = call_trees.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, call_trees, {'setattr': setattr}) as result:
with self.test_session() as sess:
- # The function has no return value, so we do some tricks to grab the
- # generated py_func node and ensure its effect only happens at graph
- # execution.
class Dummy(object):
pass
a = Dummy()
result.test_fn(a)
+ py_func_op, = sess.graph.get_operations()
self.assertFalse(hasattr(a, 'foo'))
- sess.run(sess.graph.get_operations()[0])
+ sess.run(py_func_op)
self.assertEquals('bar', a.foo)
- def test_py_func_wrap_known_function(self):
+ def test_py_func_known_function(self):
def test_fn():
return np.random.binomial(2, 0.5)
- node = self.parse_and_analyze(test_fn, {'np': np})
- node = call_trees.transform(node, self.ctx)
-
- with self.compiled(node, dtypes.int64) as result:
- result.np = np
+ with self.converted(test_fn, call_trees, {'np': np},
+ dtypes.int64) as result:
with self.test_session() as sess:
self.assertTrue(isinstance(result.test_fn(), ops.Tensor))
self.assertIn(sess.run(result.test_fn()), (0, 1, 2))
@@ -130,22 +121,17 @@ class CallTreesTest(converter_testing.TestCase):
a = math_ops.add(a, constant_op.constant(1))
return a
- node = self.parse_and_analyze(
- test_fn, {
- 'math_ops': math_ops,
- 'constant_op': constant_op
- },
+ ns = {'math_ops': math_ops, 'constant_op': constant_op}
+ node, ctx = self.prepare(
+ test_fn,
+ ns,
arg_types=set(((math_ops.__name__,), (constant_op.__name__,))))
- node = call_trees.transform(node, self.ctx)
+ node = call_trees.transform(node, ctx)
- with self.compiled(node) as result:
- result.math_ops = math_ops
- result.constant_op = constant_op
+ with self.compiled(node, ns) as result:
with self.test_session() as sess:
- # Not renamed, because the converter doesn't rename the definition
- # itself (the caller is responsible for that).
result_tensor = result.test_fn(constant_op.constant(1))
- self.assertEquals(3, sess.run(result_tensor))
+ self.assertEquals(sess.run(result_tensor), 3)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/conditional_expressions.py b/tensorflow/contrib/autograph/converters/conditional_expressions.py
new file mode 100644
index 0000000000..63f649dfdf
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/conditional_expressions.py
@@ -0,0 +1,129 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Converts the ternary conditional operator."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import templates
+from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
+
+
+class _FunctionDefs(object):
+
+ def __init__(self):
+ self.nodes = []
+
+
+class _Statement(object):
+
+ def __init__(self):
+ self.scope = None
+
+
+class ConditionalExpressionTransformer(converter.Base):
+ """Converts contitional expressions to functional form."""
+
+ def _postprocess_statement(self, node):
+ """Inserts any separate functions that node may use."""
+ replacements = []
+ for def_node in self.state[_FunctionDefs].nodes:
+ replacements.extend(def_node)
+ replacements.append(node)
+ node = replacements
+ # The corresponding enter is called by self.visit_block (see _process_block)
+ self.state[_FunctionDefs].exit()
+ return node, None
+
+ def _create_branch(self, expr, name_stem):
+ scope = self.state[_Statement].scope
+ name = self.ctx.namer.new_symbol(name_stem, scope.referenced)
+ template = """
+ def name():
+ return expr,
+ """
+ node = templates.replace(template, name=name, expr=expr)
+ self.state[_FunctionDefs].nodes.append(node)
+ return name
+
+ def visit_IfExp(self, node):
+ if anno.hasanno(node.test, anno.Basic.QN):
+ name_root = anno.getanno(node.test, anno.Basic.QN).ssf()
+ else:
+ name_root = 'ifexp'
+
+ true_fn_name = self._create_branch(node.body, '%s_true' % name_root)
+ false_fn_name = self._create_branch(node.orelse, '%s_false' % name_root)
+
+ return templates.replace_as_expression(
+ 'ag__.utils.run_cond(test, true_fn_name, false_fn_name)',
+ test=node.test,
+ true_fn_name=true_fn_name,
+ false_fn_name=false_fn_name)
+
+ def _process_block(self, scope, block):
+ self.state[_Statement].enter()
+ self.state[_Statement].scope = scope
+ block = self.visit_block(
+ block,
+ before_visit=self.state[_FunctionDefs].enter,
+ after_visit=self._postprocess_statement)
+ self.state[_Statement].exit()
+ return block
+
+ def visit_FunctionDef(self, node):
+ node.args = self.generic_visit(node.args)
+ node.decorator_list = self.visit_block(node.decorator_list)
+ node.body = self._process_block(
+ anno.getanno(node, anno.Static.SCOPE), node.body)
+ return node
+
+ def visit_For(self, node):
+ node.target = self.visit(node.target)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ node.orelse = self._process_block(
+ anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+ return node
+
+ def visit_While(self, node):
+ node.test = self.visit(node.test)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ node.orelse = self._process_block(
+ anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+ return node
+
+ def visit_If(self, node):
+ node.test = self.visit(node.test)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ node.orelse = self._process_block(
+ anno.getanno(node, NodeAnno.ORELSE_SCOPE), node.orelse)
+ return node
+
+ def visit_With(self, node):
+ node.items = self.visit_block(node.items)
+ node.body = self._process_block(
+ anno.getanno(node, NodeAnno.BODY_SCOPE), node.body)
+ return node
+
+
+def transform(node, ctx):
+ node = ConditionalExpressionTransformer(ctx).visit(node)
+ return node
diff --git a/tensorflow/contrib/autograph/converters/conditional_expressions_test.py b/tensorflow/contrib/autograph/converters/conditional_expressions_test.py
new file mode 100644
index 0000000000..95a3108741
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/conditional_expressions_test.py
@@ -0,0 +1,53 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for conditional_expressions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import conditional_expressions
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.platform import test
+
+
+class ConditionalExpressionsTest(converter_testing.TestCase):
+
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ ns = {}
+ with self.converted(test_fn, conditional_expressions, ns) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_basic(self):
+
+ def test_fn(x):
+ return 1 if x else 0
+
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 3)
+
+ def test_nested_orelse(self):
+
+ def test_fn(x):
+ y = x * x if x > 0 else x if x else 1
+ return y
+
+ self.assertTransformedEquivalent(test_fn, -2)
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 2)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/continue_statements_test.py b/tensorflow/contrib/autograph/converters/continue_statements_test.py
index 2ce1837972..3a7c7d1486 100644
--- a/tensorflow/contrib/autograph/converters/continue_statements_test.py
+++ b/tensorflow/contrib/autograph/converters/continue_statements_test.py
@@ -25,7 +25,11 @@ from tensorflow.python.platform import test
class ContinueCanonicalizationTest(converter_testing.TestCase):
- def test_basic_continue(self):
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ with self.converted(test_fn, continue_statements, {}) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_basic(self):
def test_fn(x):
v = []
@@ -36,17 +40,12 @@ class ContinueCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = continue_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(test_fn(0), result.test_fn(0))
- self.assertEqual(test_fn(1), result.test_fn(1))
- self.assertEqual(test_fn(2), result.test_fn(2))
- self.assertEqual(test_fn(3), result.test_fn(3))
- self.assertEqual(test_fn(4), result.test_fn(4))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 1)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 4)
- def test_basic_continue_for_loop(self):
+ def test_for_loop(self):
def test_fn(a):
v = []
@@ -57,16 +56,12 @@ class ContinueCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v
- node = self.parse_and_analyze(test_fn, {})
- node = continue_statements.transform(node, self.ctx)
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [1])
+ self.assertTransformedEquivalent(test_fn, [2])
+ self.assertTransformedEquivalent(test_fn, [1, 2, 3])
- with self.compiled(node) as result:
- self.assertEqual(test_fn([]), result.test_fn([]))
- self.assertEqual(test_fn([1]), result.test_fn([1]))
- self.assertEqual(test_fn([2]), result.test_fn([2]))
- self.assertEqual(test_fn([1, 2, 3]), result.test_fn([1, 2, 3]))
-
- def test_continue_deeply_nested(self):
+ def test_nested(self):
def test_fn(x):
v = []
@@ -83,15 +78,10 @@ class ContinueCanonicalizationTest(converter_testing.TestCase):
v.append(x)
return v, u, w
- node = self.parse_and_analyze(test_fn, {})
- node = continue_statements.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- self.assertEqual(test_fn(0), result.test_fn(0))
- self.assertEqual(test_fn(1), result.test_fn(1))
- self.assertEqual(test_fn(2), result.test_fn(2))
- self.assertEqual(test_fn(3), result.test_fn(3))
- self.assertEqual(test_fn(4), result.test_fn(4))
+ self.assertTransformedEquivalent(test_fn, 0)
+ self.assertTransformedEquivalent(test_fn, 1)
+ self.assertTransformedEquivalent(test_fn, 3)
+ self.assertTransformedEquivalent(test_fn, 4)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/control_flow.py b/tensorflow/contrib/autograph/converters/control_flow.py
index f4a8710627..5a5a6ad63a 100644
--- a/tensorflow/contrib/autograph/converters/control_flow.py
+++ b/tensorflow/contrib/autograph/converters/control_flow.py
@@ -25,8 +25,7 @@ from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
-from tensorflow.contrib.autograph.pyct.static_analysis import cfg
-from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
class SymbolNamer(object):
@@ -47,6 +46,7 @@ class SymbolNamer(object):
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
+
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if aliased_orig_names:
@@ -90,55 +90,51 @@ class ControlFlowTransformer(converter.Base):
return templates.replace(
template, test=test, body_name=body_name, orelse_name=orelse_name)
- def visit_If(self, node):
- self.generic_visit(node)
+ def _fmt_symbol_list(self, symbol_set):
+ if not symbol_set:
+ return 'no variables'
+ return ', '.join(map(str, symbol_set))
- body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
- orelse_scope = anno.getanno(node, NodeAnno.ORELSE_SCOPE)
- body_defs = body_scope.created | body_scope.modified
- orelse_defs = orelse_scope.created | orelse_scope.modified
- live = anno.getanno(node, 'live_out')
-
- # We'll need to check if we're closing over variables that are defined
- # elsewhere in the function
- # NOTE: we can only detect syntactic closure in the scope
- # of the code passed in. If the AutoGraph'd function itself closes
- # over other variables, this analysis won't take that into account.
- defined = anno.getanno(node, 'defined_in')
-
- # We only need to return variables that are
- # - modified by one or both branches
- # - live (or has a live parent) at the end of the conditional
- modified = []
- for def_ in body_defs | orelse_defs:
- def_with_parents = set((def_,)) | def_.support_set
- if live & def_with_parents:
- modified.append(def_)
-
- # We need to check if live created variables are balanced
- # in both branches
- created = live & (body_scope.created | orelse_scope.created)
-
- # The if statement is illegal if there are variables that are created,
- # that are also live, but both branches don't create them.
- if created:
- if created != (body_scope.created & live):
- raise ValueError(
- 'The main branch does not create all live symbols that the else '
- 'branch does.')
- if created != (orelse_scope.created & live):
- raise ValueError(
- 'The else branch does not create all live symbols that the main '
- 'branch does.')
-
- # Alias the closure variables inside the conditional functions
- # to avoid errors caused by the local variables created in the branch
- # functions.
+ def visit_If(self, node):
+ node = self.generic_visit(node)
+
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+ orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
+ defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
+ live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
+
+ modified_in_cond = body_scope.modified | orelse_scope.modified
+ returned_from_cond = set()
+ for s in modified_in_cond:
+ if s in live_out:
+ returned_from_cond.add(s)
+ elif s.is_composite():
+ # Special treatment for compound objects: if any of their owner entities
+ # are live, then they are outputs as well.
+ if any(owner in live_out for owner in s.owner_set):
+ returned_from_cond.add(s)
+
+ need_alias_in_body = body_scope.modified & defined_in
+ need_alias_in_orelse = orelse_scope.modified & defined_in
+
+ created_in_body = body_scope.modified & returned_from_cond - defined_in
+ created_in_orelse = orelse_scope.modified & returned_from_cond - defined_in
+
+ if created_in_body != created_in_orelse:
+ raise ValueError(
+ 'if statement may not initialize all variables: the true branch'
+ ' creates %s, while the false branch creates %s. Make sure all'
+ ' these variables are initialized either in both'
+ ' branches or before the if statement.' %
+ (self._fmt_symbol_list(created_in_body),
+ self._fmt_symbol_list(created_in_orelse)))
+
+ # Alias the closure variables inside the conditional functions, to allow
+ # the functions access to the respective variables.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
- aliased_body_orig_names = tuple(body_scope.modified - body_scope.created)
- aliased_orelse_orig_names = tuple(orelse_scope.modified -
- orelse_scope.created)
+ aliased_body_orig_names = tuple(need_alias_in_body)
+ aliased_orelse_orig_names = tuple(need_alias_in_orelse)
aliased_body_new_names = tuple(
self.ctx.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
@@ -153,58 +149,47 @@ class ControlFlowTransformer(converter.Base):
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
- if not modified:
+ returned_from_cond = tuple(returned_from_cond)
+ if returned_from_cond:
+ if len(returned_from_cond) == 1:
+ # TODO(mdan): Move this quirk into the operator implementation.
+ cond_results = returned_from_cond[0]
+ else:
+ cond_results = gast.Tuple([s.ast() for s in returned_from_cond], None)
+
+ returned_from_body = tuple(
+ alias_body_map[s] if s in need_alias_in_body else s
+ for s in returned_from_cond)
+ returned_from_orelse = tuple(
+ alias_orelse_map[s] if s in need_alias_in_orelse else s
+ for s in returned_from_cond)
+
+ else:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
- results = None
- elif len(modified) == 1:
- results = modified[0]
- else:
- results = gast.Tuple([s.ast() for s in modified], None)
+ cond_results = None
+ # TODO(mdan): This doesn't belong here; it's specific to the operator.
+ returned_from_body = templates.replace_as_expression('tf.constant(1)')
+ returned_from_orelse = templates.replace_as_expression('tf.constant(1)')
body_name = self.ctx.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.ctx.namer.new_symbol('if_false', orelse_scope.referenced)
- if modified:
-
- def build_returns(aliased_names, alias_map, scope):
- """Builds list of return variables for a branch of a conditional."""
- returns = []
- for s in modified:
- if s in aliased_names:
- returns.append(alias_map[s])
- else:
- if s not in scope.created | defined:
- raise ValueError(
- 'Attempting to return variable "%s" from the true branch of '
- 'a conditional, but it was not closed over, or created in '
- 'this branch.' % str(s))
- else:
- returns.append(s)
- return tuple(returns)
-
- body_returns = build_returns(aliased_body_orig_names, alias_body_map,
- body_scope)
- orelse_returns = build_returns(aliased_orelse_orig_names,
- alias_orelse_map, orelse_scope)
-
- else:
- body_returns = orelse_returns = templates.replace('tf.ones(())')[0].value
body_def = self._create_cond_branch(
body_name,
- aliased_orig_names=tuple(aliased_body_orig_names),
- aliased_new_names=tuple(aliased_body_new_names),
+ aliased_orig_names=aliased_body_orig_names,
+ aliased_new_names=aliased_body_new_names,
body=node_body,
- returns=body_returns)
+ returns=returned_from_body)
orelse_def = self._create_cond_branch(
orelse_name,
- aliased_orig_names=tuple(aliased_orelse_orig_names),
- aliased_new_names=tuple(aliased_orelse_new_names),
+ aliased_orig_names=aliased_orelse_orig_names,
+ aliased_new_names=aliased_orelse_new_names,
body=node_orelse,
- returns=orelse_returns)
- cond_expr = self._create_cond_expr(results, node.test, body_name,
+ returns=returned_from_orelse)
+ cond_expr = self._create_cond_expr(cond_results, node.test, body_name,
orelse_name)
return body_def + orelse_def + cond_expr
@@ -212,11 +197,11 @@ class ControlFlowTransformer(converter.Base):
def visit_While(self, node):
self.generic_visit(node)
- body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
- cond_scope = anno.getanno(node, NodeAnno.COND_SCOPE)
+ cond_scope = anno.getanno(node, annos.NodeAnno.COND_SCOPE)
cond_closure = set()
for s in cond_scope.referenced:
for root in s.support_set:
@@ -277,7 +262,7 @@ class ControlFlowTransformer(converter.Base):
def visit_For(self, node):
self.generic_visit(node)
- body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
@@ -331,7 +316,5 @@ class ControlFlowTransformer(converter.Base):
def transform(node, ctx):
- cfg.run_analyses(node, cfg.Liveness(ctx.info))
- cfg.run_analyses(node, cfg.Defined(ctx.info))
node = ControlFlowTransformer(ctx).visit(node)
return node
diff --git a/tensorflow/contrib/autograph/converters/control_flow_test.py b/tensorflow/contrib/autograph/converters/control_flow_test.py
index 735eb92a0d..ade3501426 100644
--- a/tensorflow/contrib/autograph/converters/control_flow_test.py
+++ b/tensorflow/contrib/autograph/converters/control_flow_test.py
@@ -20,16 +20,23 @@ from __future__ import print_function
from tensorflow.contrib.autograph.converters import control_flow
from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
class ControlFlowTest(converter_testing.TestCase):
- def test_simple_while(self):
+ def assertTransformedResult(self, test_fn, inputs, expected):
+ if not isinstance(inputs, tuple):
+ inputs = (inputs,)
+ with self.converted(test_fn, control_flow, {},
+ constant_op.constant) as result:
+ with self.test_session() as sess:
+ self.assertEqual(sess.run(result.test_fn(*inputs)), expected)
+
+ def test_while_basic(self):
def test_fn(n):
i = 0
@@ -39,29 +46,18 @@ class ControlFlowTest(converter_testing.TestCase):
i += 1
return s, i, n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- with self.test_session() as sess:
- self.assertEqual((10, 5, 5),
- sess.run(result.test_fn(constant_op.constant(5))))
+ self.assertTransformedResult(test_fn, constant_op.constant(5), (10, 5, 5))
- def test_while_single_var(self):
+ def test_while_single_output(self):
def test_fn(n):
while n > 0:
n -= 1
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(5), 0)
- with self.compiled(node) as result:
- with self.test_session() as sess:
- self.assertEqual(0, sess.run(result.test_fn(constant_op.constant(5))))
-
- def test_simple_if(self):
+ def test_if_basic(self):
def test_fn(n):
a = 0
@@ -72,114 +68,85 @@ class ControlFlowTest(converter_testing.TestCase):
b = 2 * n
return a, b
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(1), (-1, 0))
+ self.assertTransformedResult(test_fn, constant_op.constant(-1), (0, -2))
+
+ def test_if_complex_outputs(self):
+
+ class TestClass(object):
- with self.compiled(node) as result:
+ def __init__(self, a, b):
+ self.a = a
+ self.b = b
+
+ def test_fn(n, obj):
+ obj.a = 0
+ obj.b = 0
+ if n > 0:
+ obj.a = -n
+ else:
+ obj.b = 2 * n
+ return obj
+
+ with self.converted(test_fn, control_flow, {}) as result:
with self.test_session() as sess:
- self.assertEqual((-1, 0),
- sess.run(result.test_fn(constant_op.constant(1))))
- self.assertEqual((0, -2),
- sess.run(result.test_fn(constant_op.constant(-1))))
+ res_obj = result.test_fn(constant_op.constant(1), TestClass(0, 0))
+ self.assertEqual(sess.run((res_obj.a, res_obj.b)), (-1, 0))
+ res_obj = result.test_fn(constant_op.constant(-1), TestClass(0, 0))
+ self.assertEqual(sess.run((res_obj.a, res_obj.b)), (0, -2))
- def test_if_single_var(self):
+ def test_if_single_output(self):
def test_fn(n):
if n > 0:
n = -n
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(1), -1)
- with self.compiled(node) as result:
- with self.test_session() as sess:
- self.assertEqual(-1, sess.run(result.test_fn(constant_op.constant(1))))
-
- def test_imbalanced_aliasing(self):
+ def test_if_semi(self):
def test_fn(n):
if n > 0:
n = 3
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
-
- with self.compiled(node, control_flow_ops.cond) as result:
- with self.test_session() as sess:
- self.assertEqual(3, sess.run(result.test_fn(constant_op.constant(2))))
- self.assertEqual(-3, sess.run(result.test_fn(constant_op.constant(-3))))
+ self.assertTransformedResult(test_fn, constant_op.constant(2), 3)
+ self.assertTransformedResult(test_fn, constant_op.constant(-3), -3)
- def test_ignore_unread_variable(self):
+ def test_if_local_var(self):
def test_fn(n):
- b = 3 # pylint: disable=unused-variable
if n > 0:
b = 4
+ n = b + 1
return n
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant(1), 5)
+ self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
- with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
- with self.test_session() as sess:
- self.assertEqual(3, sess.run(result.test_fn(constant_op.constant(3))))
- self.assertEqual(-3, sess.run(result.test_fn(constant_op.constant(-3))))
+ def test_if_no_outputs(self):
- def test_handle_temp_variable(self):
+ def test_fn(n):
+ if n > 0:
+ b = 4 # pylint:disable=unused-variable
+ return n
- def test_fn_using_temp(x, y, w):
- if x < y:
- z = x + y
- else:
- w = 2
- tmp = w
- z = x - tmp
- return z, w
+ # Without side effect guards, the if statement will stage a cond,
+ # but that will be pruned at execution.
+ self.assertTransformedResult(test_fn, constant_op.constant(1), 1)
+ self.assertTransformedResult(test_fn, constant_op.constant(-1), -1)
- node = self.parse_and_analyze(test_fn_using_temp, {})
- node = control_flow.transform(node, self.ctx)
+ def test_if_imbalanced_outputs(self):
- with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
- with self.test_session() as sess:
- z, w = sess.run(
- result.test_fn_using_temp(
- constant_op.constant(-3), constant_op.constant(3),
- constant_op.constant(3)))
- self.assertEqual(0, z)
- self.assertEqual(3, w)
- z, w = sess.run(
- result.test_fn_using_temp(
- constant_op.constant(3), constant_op.constant(-3),
- constant_op.constant(3)))
- self.assertEqual(1, z)
- self.assertEqual(2, w)
-
- def test_fn_ignoring_temp(x, y, w):
- if x < y:
- z = x + y
- else:
- w = 2
- tmp = w
- z = x - tmp
- return z
+ def test_fn(n):
+ if n > 0:
+ b = 4
+ return b
- node = self.parse_and_analyze(test_fn_ignoring_temp, {})
- node = control_flow.transform(node, self.ctx)
-
- with self.compiled(node, control_flow_ops.cond, array_ops.ones) as result:
- with self.test_session() as sess:
- z = sess.run(
- result.test_fn_ignoring_temp(
- constant_op.constant(-3), constant_op.constant(3),
- constant_op.constant(3)))
- self.assertEqual(0, z)
- z = sess.run(
- result.test_fn_ignoring_temp(
- constant_op.constant(3), constant_op.constant(-3),
- constant_op.constant(3)))
- self.assertEqual(1, z)
+ node, ctx = self.prepare(test_fn, {})
+ with self.assertRaises(transformer.AutographParseError):
+ control_flow.transform(node, ctx)
def test_simple_for(self):
@@ -191,22 +158,11 @@ class ControlFlowTest(converter_testing.TestCase):
s2 += e * e
return s1, s2
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), (4, 10))
+ empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
+ self.assertTransformedResult(test_fn, empty_vector, (0, 0))
- with self.compiled(node) as result:
- with self.test_session() as sess:
- l = [1, 2, 3]
- self.assertEqual(
- test_fn(l), sess.run(result.test_fn(constant_op.constant(l))))
- l = []
- self.assertEqual(
- test_fn(l),
- sess.run(
- result.test_fn(
- constant_op.constant(l, shape=(0,), dtype=dtypes.int32))))
-
- def test_for_single_var(self):
+ def test_for_single_output(self):
def test_fn(l):
s = 0
@@ -214,22 +170,11 @@ class ControlFlowTest(converter_testing.TestCase):
s += e
return s
- node = self.parse_and_analyze(test_fn, {})
- node = control_flow.transform(node, self.ctx)
+ self.assertTransformedResult(test_fn, constant_op.constant([1, 3]), 4)
+ empty_vector = constant_op.constant([], shape=(0,), dtype=dtypes.int32)
+ self.assertTransformedResult(test_fn, empty_vector, 0)
- with self.compiled(node) as result:
- with self.test_session() as sess:
- l = [1, 2, 3]
- self.assertEqual(
- test_fn(l), sess.run(result.test_fn(constant_op.constant(l))))
- l = []
- self.assertEqual(
- test_fn(l),
- sess.run(
- result.test_fn(
- constant_op.constant(l, shape=(0,), dtype=dtypes.int32))))
-
- def test_for_with_iterated_expression(self):
+ def test_for_iterated_expression(self):
eval_count = [0]
@@ -243,14 +188,13 @@ class ControlFlowTest(converter_testing.TestCase):
s += e
return s
- node = self.parse_and_analyze(test_fn, {'count_evals': count_evals})
- node = control_flow.transform(node, self.ctx)
+ ns = {'count_evals': count_evals}
+ node, ctx = self.prepare(test_fn, ns)
+ node = control_flow.transform(node, ctx)
- with self.compiled(node) as result:
- result.count_evals = count_evals
- self.assertEqual(test_fn(5), result.test_fn(5))
- # count_evals ran twice, once for test_fn and another for result.test_fn
- self.assertEqual(eval_count[0], 2)
+ with self.compiled(node, ns) as result:
+ self.assertEqual(result.test_fn(5), 10)
+ self.assertEqual(eval_count[0], 1)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/decorators_test.py b/tensorflow/contrib/autograph/converters/decorators_test.py
index d41c7fde24..095abc5edc 100644
--- a/tensorflow/contrib/autograph/converters/decorators_test.py
+++ b/tensorflow/contrib/autograph/converters/decorators_test.py
@@ -61,13 +61,13 @@ class DecoratorsTest(converter_testing.TestCase):
'simple_decorator': simple_decorator,
'converter_testing': converter_testing,
}
- node = self.parse_and_analyze(
+ node, ctx = self.prepare(
f,
namespace,
recursive=False,
autograph_decorators=autograph_decorators)
- node = decorators.transform(node, self.ctx)
- import_line = '\n'.join(self.ctx.program.additional_imports)
+ node = decorators.transform(node, ctx)
+ import_line = '\n'.join(ctx.program.additional_imports)
result, _ = compiler.ast_to_object(node, source_prefix=import_line)
return getattr(result, f.__name__)
@@ -76,11 +76,8 @@ class DecoratorsTest(converter_testing.TestCase):
def test_fn(a):
return a
- node = self.parse_and_analyze(test_fn, {})
- node = decorators.transform(node, self.ctx)
- result, _ = compiler.ast_to_object(node)
-
- self.assertEqual(1, result.test_fn(1))
+ with self.converted(test_fn, decorators, {}) as result:
+ self.assertEqual(1, result.test_fn(1))
def test_function(self):
@@ -124,7 +121,7 @@ class DecoratorsTest(converter_testing.TestCase):
return b + 11
return inner_fn(a)
- # Expected to fail because simple_decorator cannot be imported.
+ # Expected to fail because simple_decorator could not be imported.
with self.assertRaises(transformer.AutographParseError):
test_fn(1)
diff --git a/tensorflow/contrib/autograph/converters/directives.py b/tensorflow/contrib/autograph/converters/directives.py
new file mode 100644
index 0000000000..ccdf79d47b
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/directives.py
@@ -0,0 +1,108 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Handles directives.
+
+This converter removes the directive functions from the code and moves the
+information they specify into AST annotations. It is a specialized form of
+static analysis, one that is specific to AutoGraph.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.util import tf_inspect
+
+ENCLOSING_LOOP = 'enclosing_loop'
+
+
+def _map_args(call_node, function):
+ """Maps AST call nodes to the actual function's arguments.
+
+ Args:
+ call_node: ast.Call
+ function: Callable[..., Any], the actual function matching call_node
+ Returns:
+ Dict[Text, ast.AST], mapping each of the function's argument names to
+ the respective AST node.
+ """
+ args = call_node.args
+ kwds = {kwd.arg: kwd.value for kwd in call_node.keywords}
+ return tf_inspect.getcallargs(function, *args, **kwds)
+
+
+class DirectivesTransformer(converter.Base):
+ """Parses compiler directives and converts them into AST annotations."""
+
+ def _process_symbol_directive(self, call_node, directive):
+ if len(call_node.args) < 1:
+ raise ValueError('"%s" requires a positional first argument'
+ ' as the target' % directive.__name__)
+ target = call_node.args[0]
+ defs = anno.getanno(target, anno.Static.ORIG_DEFINITIONS)
+ for def_ in defs:
+ def_.directives[directive] = _map_args(call_node, directive)
+ return call_node
+
+ def _process_statement_directive(self, call_node, directive):
+ if self.local_scope_level < 1:
+ raise ValueError(
+ '"%s" must be used inside a statement' % directive.__name__)
+ target = self.get_local(ENCLOSING_LOOP)
+ node_anno = anno.getanno(target, converter.AgAnno.DIRECTIVES, {})
+ node_anno[directive] = _map_args(call_node, directive)
+ anno.setanno(target, converter.AgAnno.DIRECTIVES, node_anno)
+ return call_node
+
+ def visit_Expr(self, node):
+ if isinstance(node.value, gast.Call):
+ call_node = node.value
+ if anno.hasanno(call_node.func, 'live_val'):
+ live_val = anno.getanno(call_node.func, 'live_val')
+
+ if live_val is directives.set_element_type:
+ call_node = self._process_symbol_directive(call_node, live_val)
+ elif live_val is directives.set_loop_options:
+ call_node = self._process_statement_directive(call_node, live_val)
+ else:
+ return self.generic_visit(node)
+
+ return None # Directive calls are not output in the generated code.
+ return self.generic_visit(node)
+
+ # TODO(mdan): This will be insufficient for other control flow.
+ # That means that if we ever have a directive that affects things other than
+ # loops, we'll need support for parallel scopes, or have multiple converters.
+ def _track_and_visit_loop(self, node):
+ self.enter_local_scope()
+ self.set_local(ENCLOSING_LOOP, node)
+ node = self.generic_visit(node)
+ self.exit_local_scope()
+ return node
+
+ def visit_While(self, node):
+ return self._track_and_visit_loop(node)
+
+ def visit_For(self, node):
+ return self._track_and_visit_loop(node)
+
+
+def transform(node, ctx):
+ return DirectivesTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/directives_test.py b/tensorflow/contrib/autograph/converters/directives_test.py
new file mode 100644
index 0000000000..5f798a5b76
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/directives_test.py
@@ -0,0 +1,78 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for directives module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import directives as directives_converter
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.core.converter import AgAnno
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.platform import test
+
+
+class DirectivesTest(converter_testing.TestCase):
+
+ def test_local_target(self):
+
+ def test_fn():
+ l = []
+ string_var = 0
+ directives.set_element_type(l, 'a', string_var)
+
+ node, ctx = self.prepare(test_fn, {'directives': directives})
+ node = directives_converter.transform(node, ctx)
+
+ def_, = anno.getanno(node.body[0].body[0].targets[0],
+ anno.Static.DEFINITIONS)
+ d = def_.directives[directives.set_element_type]
+ self.assertEqual(d['dtype'].s, 'a')
+ self.assertEqual(d['shape'].id, 'string_var')
+
+ def test_argument_target(self):
+
+ def test_fn(a):
+ directives.set_element_type(a, 1, shape=2)
+
+ node, ctx = self.prepare(test_fn, {'directives': directives})
+ node = directives_converter.transform(node, ctx)
+
+ def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+ d = def_.directives[directives.set_element_type]
+ self.assertEqual(d['dtype'].n, 1)
+ self.assertEqual(d['shape'].n, 2)
+
+ def test_loop_target(self):
+
+ def test_fn():
+ a = True
+ while True:
+ directives.set_loop_options(parallel_iterations=10, back_prop=a)
+
+ node, ctx = self.prepare(test_fn, {'directives': directives})
+ node = directives_converter.transform(node, ctx)
+
+ d = anno.getanno(node.body[0].body[1], AgAnno.DIRECTIVES)
+ d = d[directives.set_loop_options]
+ self.assertEqual(d['parallel_iterations'].n, 10)
+ self.assertEqual(d['back_prop'].id, 'a')
+ self.assertEqual(d['swap_memory'], directives.UNSPECIFIED)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/error_handlers.py b/tensorflow/contrib/autograph/converters/error_handlers.py
new file mode 100644
index 0000000000..3f23662152
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/error_handlers.py
@@ -0,0 +1,52 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Wraps function bodies with a try/except to rewrite error tracebacks.
+
+Only adds try/except wrappers to functions that have the anno.Basic.ORIGIN
+annotation because these are the functions originally written by the user.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import templates
+
+
+class ErrorRewritingTransformer(converter.Base):
+ """Possibly wraps the body of a function in a try/except.
+
+ Only wraps functions that were originally defined by the user, detected by
+ checking for the anno.Basic.ORIGIN annotation.
+ """
+
+ def visit_FunctionDef(self, node):
+ node = self.generic_visit(node)
+
+ if anno.hasanno(node, anno.Basic.ORIGIN):
+ template = """
+ try:
+ body
+ except:
+ ag__.rewrite_graph_construction_error(ag_source_map__)
+ """
+ node.body = templates.replace(template, body=node.body)
+ return node
+
+
+def transform(node, ctx):
+ return ErrorRewritingTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/error_handlers_test.py b/tensorflow/contrib/autograph/converters/error_handlers_test.py
new file mode 100644
index 0000000000..878526c8b4
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/error_handlers_test.py
@@ -0,0 +1,55 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for error_handlers module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import error_handlers
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.core import errors
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.python.platform import test
+
+
+class ErrorHandlersTest(converter_testing.TestCase):
+
+ def test_basic(self):
+
+ def test_fn():
+ raise ValueError()
+
+ node, ctx = self.prepare(test_fn, {})
+ anno.setanno(node.body[0], anno.Basic.ORIGIN,
+ origin_info.OriginInfo('test_path', None, None, None, None))
+ node = error_handlers.transform(node, ctx)
+ with self.compiled(node, {}) as result:
+ with self.assertRaises(errors.GraphConstructionError):
+ result.test_fn()
+
+ def test_no_origin_annotation(self):
+
+ def test_fn():
+ raise ValueError()
+
+ with self.converted(test_fn, error_handlers, {}) as result:
+ with self.assertRaises(ValueError):
+ result.test_fn()
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/ifexp.py b/tensorflow/contrib/autograph/converters/ifexp.py
deleted file mode 100644
index e996138498..0000000000
--- a/tensorflow/contrib/autograph/converters/ifexp.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Canonicalizes the ternary conditional operator."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import templates
-
-
-class IfExp(converter.Base):
- """Canonicalizes all IfExp nodes into plain conditionals."""
-
- def visit_IfExp(self, node):
- template = """
- ag__.utils.run_cond(test, lambda: (body,), lambda: (orelse,))
- """
- desugared_ifexp = templates.replace_as_expression(
- template, test=node.test, body=node.body, orelse=node.orelse)
- return desugared_ifexp
-
-
-def transform(node, ctx):
- """Desugar IfExp nodes into plain conditionals.
-
- Args:
- node: ast.AST, the node to transform
- ctx: converter.EntityContext
-
- Returns:
- new_node: an AST with no IfExp nodes, only conditionals.
- """
-
- node = IfExp(ctx).visit(node)
- return node
diff --git a/tensorflow/contrib/autograph/converters/ifexp_test.py b/tensorflow/contrib/autograph/converters/ifexp_test.py
deleted file mode 100644
index cdd5a2f591..0000000000
--- a/tensorflow/contrib/autograph/converters/ifexp_test.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for ifexp module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph import utils
-from tensorflow.contrib.autograph.converters import ifexp
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.platform import test
-
-
-class IfExpTest(converter_testing.TestCase):
-
- def compiled_fn(self, test_fn, *args):
- node = self.parse_and_analyze(test_fn, {})
- node = ifexp.transform(node, self.ctx)
- module = self.compiled(node, *args)
- return module
-
- def test_simple(self):
-
- def test_fn(x):
- return 1 if x else 0
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [0, 1]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_fn(self):
-
- def f(x):
- return 3 * x
-
- def test_fn(x):
- y = f(x * x if x > 0 else x)
- return y
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- result.f = f
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_exp(self):
-
- def test_fn(x):
- return x * x if x > 0 else x
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_nested(self):
-
- def test_fn(x):
- return x * x if x > 0 else x if x else 1
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 0, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_in_cond(self):
-
- def test_fn(x):
- if x > 0:
- return x * x if x < 5 else x * x * x
- return -x
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 2, 5]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_assign_in_cond(self):
-
- def test_fn(x):
- if x > 0:
- x = -x if x < 5 else x
- return x
-
- with self.compiled_fn(test_fn) as result:
- result.autograph_util = utils
- for x in [-2, 2, 5]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/autograph/converters/list_comprehension.py b/tensorflow/contrib/autograph/converters/list_comprehension.py
deleted file mode 100644
index c4a13ee822..0000000000
--- a/tensorflow/contrib/autograph/converters/list_comprehension.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Canonicalizing list comprehensions into for and if statements.
-
-e.g.
-result = [x * x for x in xs]
-
-becomes
-
-result = []
-for x in xs:
- elt = x * x
- result.append(elt)
-"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import gast
-
-from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import parser
-from tensorflow.contrib.autograph.pyct import templates
-
-
-class ListCompCanonicalizationTransformer(converter.Base):
- """NodeTransformer to canonicalize list comprehensions."""
-
- def make_update_list_node(self, list_, elt):
- return templates.replace('list_.append(elt)', list_=list_, elt=elt)[0]
-
- def instantiate_list_node(self):
- return parser.parse_str('[]').body[0].value
-
- def visit_Assign(self, node):
- if not isinstance(node.value, gast.ListComp):
- return node
- if len(node.targets) > 1:
- raise ValueError('Only support single assignment.')
- return self.canonicalize_listcomp(node.targets[0], node.value)
-
- def canonicalize_listcomp(self, result_node, list_comp_node):
-
- make_list = templates.replace(
- 'list_ = create_list',
- list_=result_node,
- create_list=self.instantiate_list_node())
- loop_body = self.make_update_list_node(result_node, list_comp_node.elt)
-
- for gen in reversed(list_comp_node.generators):
- for gen_if in reversed(gen.ifs):
- loop_body = templates.replace(
- 'if test: loop_body', test=gen_if, loop_body=loop_body)
- loop_body = templates.replace(
- 'for target in iter_: loop_body',
- iter_=gen.iter,
- target=gen.target,
- loop_body=loop_body)
-
- return make_list + loop_body
-
-
-def transform(node, ctx):
- return ListCompCanonicalizationTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/list_comprehensions.py b/tensorflow/contrib/autograph/converters/list_comprehensions.py
new file mode 100644
index 0000000000..ecf4628816
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/list_comprehensions.py
@@ -0,0 +1,82 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Lowers list comprehensions into for and if statements.
+
+Example:
+
+ result = [x * x for x in xs]
+
+becomes
+
+ result = []
+ for x in xs:
+ elt = x * x
+ result.append(elt)
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.pyct import templates
+
+
+# TODO(mdan): This should covert directly to operator calls.
+
+
+class ListCompTransformer(converter.Base):
+ """Lowers list comprehensions into standard control flow."""
+
+ def visit_Assign(self, node):
+ if not isinstance(node.value, gast.ListComp):
+ return self.generic_visit(node)
+ if len(node.targets) > 1:
+ raise NotImplementedError('multiple assignments')
+
+ target, = node.targets
+ list_comp_node = node.value
+
+ template = """
+ target = []
+ """
+ initialization = templates.replace(template, target=target)
+
+ template = """
+ target.append(elt)
+ """
+ body = templates.replace(template, target=target, elt=list_comp_node.elt)
+
+ for gen in reversed(list_comp_node.generators):
+ for gen_if in reversed(gen.ifs):
+ template = """
+ if test:
+ body
+ """
+ body = templates.replace(template, test=gen_if, body=body)
+ template = """
+ for target in iter_:
+ body
+ """
+ body = templates.replace(
+ template, iter_=gen.iter, target=gen.target, body=body)
+
+ return initialization + body
+
+
+def transform(node, ctx):
+ return ListCompTransformer(ctx).visit(node)
diff --git a/tensorflow/contrib/autograph/converters/list_comprehension_test.py b/tensorflow/contrib/autograph/converters/list_comprehensions_test.py
index 2bbee93412..59b5ce9ca0 100644
--- a/tensorflow/contrib/autograph/converters/list_comprehension_test.py
+++ b/tensorflow/contrib/autograph/converters/list_comprehensions_test.py
@@ -12,33 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for list_comprehension module."""
+"""Tests for list_comprehensions module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.autograph.converters import list_comprehension
+from tensorflow.contrib.autograph.converters import list_comprehensions
from tensorflow.contrib.autograph.core import converter_testing
from tensorflow.python.platform import test
class ListCompTest(converter_testing.TestCase):
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ with self.converted(test_fn, list_comprehensions, {}) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
def test_basic(self):
def test_fn(l):
s = [e * e for e in l]
return s
- node = self.parse_and_analyze(test_fn, {})
- node = list_comprehension.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- l = [1, 2, 3]
- self.assertEqual(test_fn(l), result.test_fn(l))
- l = []
- self.assertEqual(test_fn(l), result.test_fn(l))
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [1, 2, 3])
def test_multiple_generators(self):
@@ -46,29 +44,17 @@ class ListCompTest(converter_testing.TestCase):
s = [e * e for sublist in l for e in sublist]
return s
- node = self.parse_and_analyze(test_fn, {})
- node = list_comprehension.transform(node, self.ctx)
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [[1], [2], [3]])
- with self.compiled(node) as result:
- l = [[1], [2], [3]]
- self.assertEqual(test_fn(l), result.test_fn(l))
- l = []
- self.assertEqual(test_fn(l), result.test_fn(l))
-
- def test_conds(self):
+ def test_cond(self):
def test_fn(l):
s = [e * e for e in l if e > 1]
return s
- node = self.parse_and_analyze(test_fn, {})
- node = list_comprehension.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- l = [1, 2, 3]
- self.assertEqual(test_fn(l), result.test_fn(l))
- l = []
- self.assertEqual(test_fn(l), result.test_fn(l))
+ self.assertTransformedEquivalent(test_fn, [])
+ self.assertTransformedEquivalent(test_fn, [1, 2, 3])
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/lists.py b/tensorflow/contrib/autograph/converters/lists.py
index d77a044798..a02fc827b8 100644
--- a/tensorflow/contrib/autograph/converters/lists.py
+++ b/tensorflow/contrib/autograph/converters/lists.py
@@ -33,6 +33,7 @@ from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.lang import directives
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
@@ -88,12 +89,12 @@ class ListTransformer(converter.Base):
scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
target_node = node.func.value
- # Attempt to use a related name if can get one. Otherwise use something
+ # Attempt to use a related name if one exists. Otherwise use something
# generic.
if anno.hasanno(target_node, anno.Basic.QN):
target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
else:
- target_name = 'list'
+ target_name = 'list_'
pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
pop_uses = self.get_local(POP_USES, [])
@@ -104,9 +105,10 @@ class ListTransformer(converter.Base):
def _replace_stack_call(self, node):
assert len(node.args) == 1
- dtype = anno.getanno(
+ dtype = self.get_definition_directive(
node.args[0],
- 'element_type',
+ directives.set_element_type,
+ 'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.list_stack(
@@ -134,7 +136,10 @@ class ListTransformer(converter.Base):
node = self._replace_append_call(node)
elif func_name == 'pop' and (len(node.args) <= 1):
node = self._replace_pop_call(node)
- elif func_name == 'stack' and (len(node.args) == 1):
+ elif (func_name == 'stack' and (len(node.args) == 1) and
+ (not node.keywords or node.keywords[0].arg == 'strict')):
+ # This avoids false positives with keyword args.
+ # TODO(mdan): handle kwargs properly.
node = self._replace_stack_call(node)
return node
@@ -146,15 +151,22 @@ class ListTransformer(converter.Base):
pop_element = original_call_node.args[0]
else:
pop_element = parser.parse_expression('None')
+
# The call will be something like "target.pop()", and the dtype is hooked to
# target, hence the func.value.
- dtype = anno.getanno(
+ # TODO(mdan): For lists of lists, this won't work.
+ # The reason why it won't work is because it's unclear how to annotate
+ # the list as a "list of lists with a certain element type" when using
+ # operations like `l.pop().pop()`.
+ dtype = self.get_definition_directive(
original_call_node.func.value,
- 'element_type',
+ directives.set_element_type,
+ 'dtype',
default=templates.replace_as_expression('None'))
- shape = anno.getanno(
+ shape = self.get_definition_directive(
original_call_node.func.value,
- 'element_shape',
+ directives.set_element_type,
+ 'shape',
default=templates.replace_as_expression('None'))
template = """
diff --git a/tensorflow/contrib/autograph/converters/lists_test.py b/tensorflow/contrib/autograph/converters/lists_test.py
index ea04097b28..f906918ac0 100644
--- a/tensorflow/contrib/autograph/converters/lists_test.py
+++ b/tensorflow/contrib/autograph/converters/lists_test.py
@@ -18,9 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.converters import lists
from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.lang import special_functions
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
@@ -28,6 +31,9 @@ from tensorflow.python.ops import list_ops
from tensorflow.python.platform import test
+tf = None # Will be replaced by a mock.
+
+
class ListTest(converter_testing.TestCase):
def test_empty_list(self):
@@ -35,10 +41,7 @@ class ListTest(converter_testing.TestCase):
def test_fn():
return []
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, lists, {}) as result:
tl = result.test_fn()
# Empty tensor lists cannot be evaluated or stacked.
self.assertTrue(isinstance(tl, ops.Tensor))
@@ -49,27 +52,19 @@ class ListTest(converter_testing.TestCase):
def test_fn():
return [1, 2, 3]
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- with self.test_session() as sess:
- tl = result.test_fn()
- r = list_ops.tensor_list_stack(tl, dtypes.int32)
- self.assertAllEqual(sess.run(r), [1, 2, 3])
+ with self.converted(test_fn, lists, {}) as result:
+ self.assertAllEqual(result.test_fn(), [1, 2, 3])
def test_list_append(self):
def test_fn():
- l = [1]
+ l = special_functions.tensor_list([1])
l.append(2)
l.append(3)
return l
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ ns = {'special_functions': special_functions}
+ with self.converted(test_fn, lists, ns) as result:
with self.test_session() as sess:
tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -78,24 +73,21 @@ class ListTest(converter_testing.TestCase):
def test_list_pop(self):
def test_fn():
- l = [1, 2, 3]
- utils.set_element_type(l, dtypes.int32, ())
+ l = special_functions.tensor_list([1, 2, 3])
s = l.pop()
return s, l
- node = self.parse_and_analyze(
- test_fn,
- {
- 'utils': utils,
- 'dtypes': dtypes
- },
- include_type_analysis=True,
- )
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
- result.utils = utils
- result.dtypes = dtypes
+ ns = {'special_functions': special_functions}
+ node, ctx = self.prepare(test_fn, ns)
+ def_, = anno.getanno(node.body[0].body[0].targets[0],
+ anno.Static.ORIG_DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32'),
+ 'shape': parser.parse_expression('()'),
+ }
+ node = lists.transform(node, ctx)
+
+ with self.compiled(node, ns, dtypes.int32) as result:
with self.test_session() as sess:
ts, tl = result.test_fn()
r = list_ops.tensor_list_stack(tl, dtypes.int32)
@@ -108,10 +100,7 @@ class ListTest(converter_testing.TestCase):
s = l.pop().pop()
return s
- node = self.parse_and_analyze(test_fn, {})
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node) as result:
+ with self.converted(test_fn, lists, {}) as result:
test_input = [1, 2, [1, 2, 3]]
# TODO(mdan): Pass a list of lists of tensor when we fully support that.
# For now, we just pass a regular Python list of lists just to verify that
@@ -120,29 +109,24 @@ class ListTest(converter_testing.TestCase):
def test_list_stack(self):
- tf = None # Will be replaced with a mock.
-
def test_fn():
l = [1, 2, 3]
- utils.set_element_type(l, dtypes.int32)
return tf.stack(l)
- node = self.parse_and_analyze(
- test_fn,
- {
- 'utils': utils,
- 'dtypes': dtypes
- },
- include_type_analysis=True,
- )
- node = lists.transform(node, self.ctx)
-
- with self.compiled(node, array_ops.stack, dtypes.int32) as result:
- result.utils = utils
- result.dtypes = dtypes
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].body[0].targets[0],
+ anno.Static.ORIG_DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32')
+ }
+ node = lists.transform(node, ctx)
+
+ with self.compiled(node, {}, array_ops.stack, dtypes.int32) as result:
with self.test_session() as sess:
self.assertAllEqual(sess.run(result.test_fn()), [1, 2, 3])
+ # TODO(mdan): Add a test with tf.stack with axis kwarg.
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/converters/logical_expressions_test.py b/tensorflow/contrib/autograph/converters/logical_expressions_test.py
index 48186024a9..ca07de5e8a 100644
--- a/tensorflow/contrib/autograph/converters/logical_expressions_test.py
+++ b/tensorflow/contrib/autograph/converters/logical_expressions_test.py
@@ -31,10 +31,8 @@ class GradientsFunctionTest(converter_testing.TestCase):
def test_fn(a, b):
return a == b
- node = self.parse_and_analyze(test_fn, {})
- node = logical_expressions.transform(node, self.ctx)
-
- with self.compiled(node, math_ops.equal) as result:
+ with self.converted(test_fn, logical_expressions, {},
+ math_ops.equal) as result:
with self.test_session() as sess:
self.assertTrue(sess.run(result.test_fn(1, 1)))
self.assertFalse(sess.run(result.test_fn(1, 2)))
@@ -44,11 +42,8 @@ class GradientsFunctionTest(converter_testing.TestCase):
def test_fn(a, b, c):
return (a or b) and (a or b or c)
- node = self.parse_and_analyze(test_fn, {})
- node = logical_expressions.transform(node, self.ctx)
-
- with self.compiled(node, math_ops.logical_or,
- math_ops.logical_and) as result:
+ with self.converted(test_fn, logical_expressions, {}, math_ops.logical_or,
+ math_ops.logical_and) as result:
with self.test_session() as sess:
self.assertTrue(sess.run(result.test_fn(True, False, True)))
diff --git a/tensorflow/contrib/autograph/converters/name_scopes_test.py b/tensorflow/contrib/autograph/converters/name_scopes_test.py
index 444d0bcd46..a329b0db70 100644
--- a/tensorflow/contrib/autograph/converters/name_scopes_test.py
+++ b/tensorflow/contrib/autograph/converters/name_scopes_test.py
@@ -31,17 +31,13 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_fn(l):
"""This should stay here."""
- a = 5
+ a = 1
l += a
return l
- node = self.parse_and_analyze(test_fn, {})
- node = name_scopes.transform(node, self.ctx)
-
- with self.compiled(node, ops.name_scope) as result:
+ with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
-
self.assertEqual('This should stay here.', result.test_fn.__doc__)
def test_long_docstring(self):
@@ -54,13 +50,12 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
Returns:
l
"""
- return l
-
- node = self.parse_and_analyze(test_fn, {})
- node = name_scopes.transform(node, self.ctx)
+ return l + 1
- with self.compiled(node, ops.name_scope) as result:
- self.assertIn('Multi-line', result.test_fn.__doc__)
+ with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
+ result_op = result.test_fn(constant_op.constant(1))
+ self.assertIn('test_fn/', result_op.op.name)
+ self.assertIn('Multi-line docstring.', result.test_fn.__doc__)
self.assertIn('Returns:', result.test_fn.__doc__)
def test_nested_functions(self):
@@ -68,21 +63,16 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_fn(l):
def inner_fn(i):
- return i ** 2
-
- l += 4
- return inner_fn(l)
+ return i + 1
- node = self.parse_and_analyze(test_fn, {})
- node = name_scopes.transform(node, self.ctx)
+ l += 1
+ return l, inner_fn(l)
- with self.compiled(node, ops.name_scope) as result:
- result_op = result.test_fn(constant_op.constant(1))
- first_result_input_name = result_op.op.inputs[0].name
- second_result_input_name = result_op.op.inputs[1].name
- self.assertIn('test_fn/', first_result_input_name)
- self.assertNotIn('inner_fn', first_result_input_name)
- self.assertIn('test_fn/inner_fn/', second_result_input_name)
+ with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
+ first, second = result.test_fn(constant_op.constant(1))
+ self.assertIn('test_fn/', first.op.name)
+ self.assertNotIn('inner_fn', first.op.name)
+ self.assertIn('test_fn/inner_fn/', second.op.name)
def test_method(self):
@@ -91,48 +81,20 @@ class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_fn(self, l):
def inner_fn(i):
- return i ** 2
-
- l += 4
- return inner_fn(l)
+ return i + 1
- # Note that 'TestClass' was needed in the namespace here.
- node = self.parse_and_analyze(
- TestClass, {'TestClass': TestClass}, owner_type=TestClass)
- node = name_scopes.transform(node, self.ctx)
+ l += 1
+ return l, inner_fn(l)
- with self.compiled(node, ops.name_scope) as result:
- result_op = result.TestClass().test_fn(constant_op.constant(1))
- first_result_input_name = result_op.op.inputs[0].name
- second_result_input_name = result_op.op.inputs[1].name
- self.assertIn('TestClass/test_fn/', first_result_input_name)
- self.assertNotIn('inner_fn', first_result_input_name)
- self.assertIn('TestClass/test_fn/inner_fn/', second_result_input_name)
+ ns = {'TestClass': TestClass}
+ node, ctx = self.prepare(TestClass, ns, owner_type=TestClass)
+ node = name_scopes.transform(node, ctx)
- def test_operator(self):
-
- class TestClass(object):
-
- def __call__(self, l):
-
- def inner_fn(i):
- return i ** 2
-
- l += 4
- return inner_fn(l)
-
- # Note that 'TestClass' was needed in the namespace here.
- node = self.parse_and_analyze(
- TestClass.__call__, {'TestClass': TestClass}, owner_type=TestClass)
- node = name_scopes.transform(node, self.ctx)
-
- with self.compiled(node, ops.name_scope) as result:
- result_op = result.__call__(TestClass(), constant_op.constant(1))
- first_result_input_name = result_op.op.inputs[0].name
- second_result_input_name = result_op.op.inputs[1].name
- self.assertIn('call__/', first_result_input_name)
- self.assertNotIn('inner_fn', first_result_input_name)
- self.assertIn('call__/inner_fn/', second_result_input_name)
+ with self.compiled(node, {}, ops.name_scope) as result:
+ first, second = result.TestClass().test_fn(constant_op.constant(1))
+ self.assertIn('TestClass/test_fn/', first.op.name)
+ self.assertNotIn('inner_fn', first.op.name)
+ self.assertIn('TestClass/test_fn/inner_fn/', second.op.name)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/single_return.py b/tensorflow/contrib/autograph/converters/return_statements.py
index a351cd81b8..a351cd81b8 100644
--- a/tensorflow/contrib/autograph/converters/single_return.py
+++ b/tensorflow/contrib/autograph/converters/return_statements.py
diff --git a/tensorflow/contrib/autograph/converters/return_statements_test.py b/tensorflow/contrib/autograph/converters/return_statements_test.py
new file mode 100644
index 0000000000..3c7c8c8a25
--- /dev/null
+++ b/tensorflow/contrib/autograph/converters/return_statements_test.py
@@ -0,0 +1,167 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for return_statements module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.converters import return_statements
+from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.python.framework import ops
+from tensorflow.python.platform import test
+
+
+class SingleReturnTest(converter_testing.TestCase):
+
+ def assertTransformedEquivalent(self, test_fn, *inputs):
+ ns = {'ops': ops}
+ with self.converted(test_fn, return_statements, ns) as result:
+ self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
+
+ def test_straightline(self):
+
+ def test_fn(x):
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+
+ def test_conditional(self):
+
+ def test_fn(x):
+ if x > 0:
+ return x
+ else:
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_missing_orelse(self):
+
+ def test_fn(x):
+ if x > 0:
+ return x
+
+ node, ctx = self.prepare(test_fn, {})
+ with self.assertRaises(ValueError):
+ return_statements.transform(node, ctx)
+
+ def test_missing_orelse_recovrable(self):
+
+ def test_fn(x):
+ if x > 0:
+ return x
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_missing_branch_return_recoverable(self):
+
+ def test_fn(x):
+ if x < 0:
+ x *= x
+ else:
+ return x
+ return x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_conditional_nested(self):
+
+ def test_fn(x):
+ if x > 0:
+ if x < 5:
+ return x
+ else:
+ return x * x
+ else:
+ return x * x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+ self.assertTransformedEquivalent(test_fn, 5)
+
+ def test_context_manager(self):
+
+ def test_fn(x):
+ with ops.name_scope(''):
+ return x * x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_context_manager_in_conditional(self):
+
+ def test_fn(x):
+ if x > 0:
+ with ops.name_scope(''):
+ return x * x
+ else:
+ return x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def text_conditional_in_context_manager(self):
+
+ def test_fn(x):
+ with ops.name_scope(''):
+ if x > 0:
+ return x * x
+ else:
+ return x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_no_return(self):
+
+ def test_fn(x):
+ x *= x
+
+ self.assertTransformedEquivalent(test_fn, 2)
+
+ def test_nested_functions(self):
+
+ def test_fn(x):
+
+ def inner_fn(y):
+ if y > 0:
+ return y * y
+ else:
+ return y
+
+ return inner_fn(x)
+
+ self.assertTransformedEquivalent(test_fn, 2)
+ self.assertTransformedEquivalent(test_fn, -2)
+
+ def test_loop(self):
+
+ def test_fn(x):
+ for _ in range(10):
+ return x
+ return x
+
+ node, ctx = self.prepare(test_fn, {})
+ with self.assertRaises(ValueError):
+ return_statements.transform(node, ctx)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/converters/side_effect_guards_test.py b/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
index a7ad8efed4..de1874321e 100644
--- a/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
+++ b/tensorflow/contrib/autograph/converters/side_effect_guards_test.py
@@ -25,140 +25,138 @@ from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import state_ops
-from tensorflow.python.ops import variables
+from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
+tf = None # Will be replaced by a mock.
+
+
class SideEffectGuardsTest(converter_testing.TestCase):
def test_side_effect_on_return_only_variable(self):
- tf = None
-
def test_fn(a):
tf.assign(a, a + 1)
return a
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- # NOTE: We don't expect the assignment to execute in this case, because
- # variables cannot be reliably guarded.
- self.assertEqual(2, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Add support for this use case.
+ # Right now the variable `a` is not conditioned on the `assign` because
+ # there's no way to add control dependencies to a variable object.
+ self.assertEqual(2, sess.run(v))
def test_side_effect_on_used_variable(self):
- tf = None
-
def test_fn(a):
tf.assign(a, a + 1)
return a + 1
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- # NOTE: Unlike test_side_effect_on_return_only_variable, the variable
- # was used in the local scope and so we could catch the assign's side
- # effect.
- self.assertEqual(4, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ # Right now it's 3 or 4 based on whether the read is synchronized.
+ self.assertEqual(3, sess.run(v))
def test_side_effect_on_tensor(self):
- tf = None
-
def test_fn(a):
tf.Assert(a > 0, ['expected in throw'])
return a
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, control_flow_ops.Assert) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, control_flow_ops.Assert) as result:
with self.test_session() as sess:
- # NOTE: In this case we can also capture the side effect because the
- # argument is a tensor ans we can wrap it inside an identity.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'expected in throw'):
sess.run(result.test_fn(constant_op.constant(-1)))
def test_multiline_block(self):
- tf = None
-
def test_fn(a):
- tf.assign(a, a + 1)
+ tf.assign_add(a, 1)
b = a + 1
- tf.assign(a, b + 1)
- c = b + 1
- d = c + 1
- return d
+ tf.assign_add(a, 1)
+ b += 1
+ return b
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ self.assertEqual(len(node.body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign_add) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- self.assertEqual(6, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ self.assertEqual(4, sess.run(v))
def test_multiline_nested_block(self):
- tf = None
-
def test_fn(a):
with tf.name_scope('foo'):
tf.assign(a, a + 1)
b = a + 1
- c = b + 1
- d = c + 1
- return d
+ return b
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- with self.compiled(node, state_ops.assign, ops.name_scope) as result:
- self.assertEqual(len(node.body[0].body[0].body), 1)
+ self.assertEqual(len(node.body[0].body[0].body), 1)
+
+ with self.compiled(node, {}, state_ops.assign, ops.name_scope) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- self.assertEqual(6, sess.run(result.test_fn(v)))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ self.assertEqual(3, sess.run(v))
def test_multiline_block_unsafe(self):
- tf = None
-
def test_fn(a):
tf.assign(a, a + 1)
b = a + 1
- tf.assign(a, a + 1)
+ tf.assign_add(a, 1)
c = b + 1
- d = c + 1
- return d
+ return c
+
+ node, ctx = self.prepare(test_fn, {})
+ node = side_effect_guards.transform(node, ctx)
- node = self.parse_and_analyze(test_fn, {})
- node = side_effect_guards.transform(node, self.ctx)
+ self.assertEqual(len(node.body[0].body), 1)
- with self.compiled(node, state_ops.assign) as result:
- self.assertEqual(len(node.body[0].body), 1)
+ with self.compiled(node, {}, state_ops.assign,
+ state_ops.assign_add) as result:
with self.test_session() as sess:
- v = variables.Variable(2)
+ v = variable_scope.get_variable('test', initializer=2)
sess.run(v.initializer)
- # NOTE: This intentionally highlights the flakiness. The test should be
- # tightened down once that is solved.
- self.assertTrue(sess.run(result.test_fn(v)) in (6, 7))
+ sess.run(result.test_fn(v))
+ # TODO(mdan): Ensure the result of test_fn(v) is also deterministic.
+ self.assertEqual(4, sess.run(v))
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/converters/single_return_test.py b/tensorflow/contrib/autograph/converters/single_return_test.py
deleted file mode 100644
index 1f0de4310e..0000000000
--- a/tensorflow/contrib/autograph/converters/single_return_test.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for single_return module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from tensorflow.contrib.autograph.converters import single_return
-from tensorflow.contrib.autograph.core import converter_testing
-from tensorflow.python.framework.ops import name_scope
-from tensorflow.python.platform import test
-
-
-class SingleReturnTest(converter_testing.TestCase):
-
- def compiled_fn(self, test_fn, *args):
- node = self.parse_and_analyze(test_fn, {})
- node = single_return.transform(node, self.ctx)
- module = self.compiled(node, *args)
- return module
-
- def test_noop(self):
- # Noop
- def test_fn(x):
- return x
-
- with self.compiled_fn(test_fn) as result:
- self.assertEqual(test_fn(2.0), result.test_fn(2.0))
-
- def test_return_expression(self):
- # ANF
- def test_fn(x):
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- x = 2
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_merge(self):
- # Simple merge
- def test_fn(x):
- if x > 0:
- return x
- else:
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_orphan_branch(self):
-
- def test_fn(x):
- if x > 0:
- return x
-
- with self.assertRaises(ValueError):
- self.compiled_fn(test_fn)
-
- def test_lift_body_into_false_branch(self):
-
- def test_fn(x):
- if x > 0:
- return x
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_lift_body_into_true_branch(self):
-
- def test_fn(x):
- if x < 0:
- x *= x
- else:
- # TODO(alexbw): linter bug here that requires us suppress this warning.
- return x # pylint: disable=undefined-loop-variable
- return x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_nested_if(self):
-
- def test_fn(x):
- if x > 0:
- if x < 5:
- return x
- else:
- return x * x
- else:
- return x * x * x
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2, 5]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_context_manager(self):
-
- def test_fn(x):
-
- with name_scope(''):
- return x * x
-
- with self.compiled_fn(test_fn) as result:
- result.name_scope = name_scope
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_context_manager_in_conditional(self):
-
- def test_fn(x):
- if x > 0:
- with name_scope(''):
- return x * x
- else:
- return x
-
- with self.compiled_fn(test_fn, name_scope) as result:
- result.name_scope = name_scope
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def text_conditional_in_context_manager(self):
-
- def test_fn(x):
- with name_scope(''):
- if x > 0:
- return x * x
- else:
- return x
-
- with self.compiled_fn(test_fn) as result:
- result.name_scope = name_scope
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_no_return(self):
-
- def test_fn(x):
- x *= x
-
- with self.compiled_fn(test_fn) as result:
- self.assertEqual(test_fn(2), result.test_fn(2))
-
- def test_nested_functiondefs(self):
-
- def test_fn(x):
-
- def inner_fn(y):
- if y > 0:
- return y * y
- else:
- return y
-
- return inner_fn(x)
-
- with self.compiled_fn(test_fn) as result:
- for x in [-2, 2]:
- self.assertEqual(test_fn(x), result.test_fn(x))
-
- def test_loop(self):
-
- def test_fn(x):
- for _ in range(10):
- return x
- return x
-
- with self.assertRaises(ValueError):
- self.compiled_fn(test_fn)
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/autograph/converters/slices.py b/tensorflow/contrib/autograph/converters/slices.py
index 3f5fc57125..9cfa066672 100644
--- a/tensorflow/contrib/autograph/converters/slices.py
+++ b/tensorflow/contrib/autograph/converters/slices.py
@@ -21,7 +21,7 @@ from __future__ import print_function
import gast
from tensorflow.contrib.autograph.core import converter
-from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.lang import directives
from tensorflow.contrib.autograph.pyct import templates
@@ -56,17 +56,17 @@ class SliceTransformer(converter.Base):
def visit_Subscript(self, node):
node = self.generic_visit(node)
if not isinstance(node.slice, gast.Index):
- # TODO(mdan): It might make more sense to wave them through.
- raise NotImplementedError('non-index slice')
+ return node
if not isinstance(node.ctx, gast.Load):
# Index writes are handled at a higher level, one at which the rvalue is
# also available.
return node
- dtype = anno.getanno(
+ dtype = self.get_definition_directive(
node.value,
- 'element_type',
+ directives.set_element_type,
+ 'dtype',
default=templates.replace_as_expression('None'))
template = """
diff --git a/tensorflow/contrib/autograph/converters/slices_test.py b/tensorflow/contrib/autograph/converters/slices_test.py
index df9a4c8bab..3c0f81e8bc 100644
--- a/tensorflow/contrib/autograph/converters/slices_test.py
+++ b/tensorflow/contrib/autograph/converters/slices_test.py
@@ -18,9 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.converters import slices
from tensorflow.contrib.autograph.core import converter_testing
+from tensorflow.contrib.autograph.lang import directives
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import list_ops
@@ -32,28 +35,42 @@ class SliceTest(converter_testing.TestCase):
def test_index_access(self):
def test_fn(l):
- utils.set_element_type(l, dtypes.int32)
return l[1]
- node = self.parse_and_analyze(
- test_fn,
- {
- 'utils': utils,
- 'dtypes': dtypes
- },
- include_type_analysis=True,
- )
- node = slices.transform(node, self.ctx)
-
- with self.compiled(node, dtypes.int32) as result:
- result.utils = utils
- result.dtypes = dtypes
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32')
+ }
+ node = slices.transform(node, ctx)
+
+ with self.compiled(node, {}, dtypes.int32) as result:
with self.test_session() as sess:
tl = list_ops.tensor_list_from_tensor(
[1, 2], element_shape=constant_op.constant([], dtype=dtypes.int32))
y = result.test_fn(tl)
self.assertEqual(2, sess.run(y))
+ def test_index_access_multiple_definitions(self):
+
+ def test_fn(l):
+ if l:
+ l = []
+ return l[1]
+
+ node, ctx = self.prepare(test_fn, {})
+ def_, = anno.getanno(node.body[0].args.args[0], anno.Static.DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.int32')
+ }
+ def_, = anno.getanno(node.body[0].body[0].body[0].targets[0],
+ anno.Static.DEFINITIONS)
+ def_.directives[directives.set_element_type] = {
+ 'dtype': parser.parse_expression('tf.float32')
+ }
+ with self.assertRaises(transformer.AutographParseError):
+ slices.transform(node, ctx)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/core/converter.py b/tensorflow/contrib/autograph/core/converter.py
index 54e6aa0f3b..a93e4a8064 100644
--- a/tensorflow/contrib/autograph/core/converter.py
+++ b/tensorflow/contrib/autograph/core/converter.py
@@ -64,15 +64,29 @@ from __future__ import division
from __future__ import print_function
import collections
+from enum import Enum
+
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import naming
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import ast_util
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import compiler
+from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import liveness
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
+from tensorflow.contrib.autograph.pyct.static_analysis import type_info
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
+# TODO(mdan): Add a test specific to this converter.
+
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
@@ -197,6 +211,46 @@ class Base(transformer.Base):
self._used = False
self._ast_depth = 0
+ def get_definition_directive(self, node, directive, arg, default):
+ """Returns the unique directive for a symbol, or a default if none exist.
+
+ See lang/directives.py for details on directives.
+
+ Args:
+ node: ast.AST
+ directive: Callable[..., Any]
+ arg: str
+ default: Any
+
+ Raises:
+ ValueError: if conflicting annotations have been found
+ """
+ defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
+ if not defs:
+ return default
+
+ # TODO(mdan): Simplify this.
+ arg_values = []
+ for def_ in defs:
+ if (directive not in def_.directives or
+ arg not in arg not in def_.directives[directive]):
+ continue
+ arg_value = def_.directives[directive][arg]
+ for prev_value in arg_values:
+ if not ast_util.matches(arg_value, prev_value):
+ qn = anno.getanno(node, anno.Basic.QN)
+ raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
+ (qn, directive.__name__, arg,
+ compiler.ast_to_source(arg_value).strip(),
+ compiler.ast_to_source(prev_value).strip()))
+ arg_values.append(arg_value)
+
+ if not arg_values:
+ return default
+
+ arg_value, = arg_values
+ return arg_value
+
def visit(self, node):
if not self._ast_depth:
if self._used:
@@ -208,3 +262,69 @@ class Base(transformer.Base):
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
+
+
+class AnnotatedDef(reaching_definitions.Definition):
+
+ def __init__(self):
+ super(AnnotatedDef, self).__init__()
+ self.directives = {}
+
+
+class AgAnno(Enum):
+ """Annotation labels specific to AutoGraph. See anno.py."""
+
+ DIRECTIVES = 'User directives associated with the annotated statement.'
+
+ def __repr__(self):
+ return self.name
+
+
+def standard_analysis(node, context, is_initial=False):
+ """Performs a complete static analysis of the given code.
+
+ Args:
+ node: ast.AST
+ context: converter.EntityContext
+ is_initial: bool, whether this is the initial analysis done on the input
+ source code
+
+ Returns:
+ ast.AST, same as node, with the static analysis annotations added
+ """
+ # TODO(mdan): Clear static analysis here.
+ # TODO(mdan): Consider not running all analyses every time.
+ # TODO(mdan): Don't return a node because it's modified by reference.
+ graphs = cfg.build(node)
+ node = qual_names.resolve(node)
+ node = activity.resolve(node, context.info, None)
+ node = reaching_definitions.resolve(node, context.info, graphs, AnnotatedDef)
+ node = liveness.resolve(node, context.info, graphs)
+ node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
+ node = type_info.resolve(node, context.info)
+ # This second call allows resolving first-order class attributes.
+ node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
+ if is_initial:
+ anno.dup(
+ node,
+ {
+ anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
+ },
+ )
+ return node
+
+
+def apply_(node, context, converter_module):
+ """Applies a converter to an AST.
+
+ Args:
+ node: ast.AST
+ context: converter.EntityContext
+ converter_module: converter.Base
+
+ Returns:
+ ast.AST, the result of applying converter to node
+ """
+ node = standard_analysis(node, context)
+ node = converter_module.transform(node, context)
+ return node
diff --git a/tensorflow/contrib/autograph/core/converter_testing.py b/tensorflow/contrib/autograph/core/converter_testing.py
index 0e46aacc12..2025e32817 100644
--- a/tensorflow/contrib/autograph/core/converter_testing.py
+++ b/tensorflow/contrib/autograph/core/converter_testing.py
@@ -20,19 +20,19 @@ from __future__ import print_function
import contextlib
import imp
+import sys
+
+import six
from tensorflow.contrib.autograph import operators
from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.core import errors
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import pretty_printer
-from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-from tensorflow.contrib.autograph.pyct.static_analysis import live_values
-from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.platform import test
@@ -74,7 +74,17 @@ class TestCase(test.TestCase):
"""Base class for unit tests in this module. Contains relevant utilities."""
@contextlib.contextmanager
- def compiled(self, node, *symbols):
+ def assertPrints(self, expected_result):
+ try:
+ out_capturer = six.StringIO()
+ sys.stdout = out_capturer
+ yield
+ self.assertEqual(out_capturer.getvalue(), expected_result)
+ finally:
+ sys.stdout = sys.__stdout__
+
+ @contextlib.contextmanager
+ def compiled(self, node, namespace, *symbols):
source = None
self.dynamic_calls = []
@@ -89,7 +99,11 @@ class TestCase(test.TestCase):
fake_ag = self.make_fake_mod('fake_ag', converted_call)
fake_ag.__dict__.update(operators.__dict__)
fake_ag.__dict__['utils'] = utils
+ fake_ag.__dict__['rewrite_graph_construction_error'] = (
+ errors.rewrite_graph_construction_error)
result.__dict__['ag__'] = fake_ag
+ for k, v in namespace.items():
+ result.__dict__[k] = v
yield result
except Exception: # pylint:disable=broad-except
if source is None:
@@ -98,6 +112,13 @@ class TestCase(test.TestCase):
print('Offending compiled code:\n%s' % source)
raise
+ @contextlib.contextmanager
+ def converted(self, entity, converter_module, namespace, *tf_symbols):
+ node, ctx = self.prepare(entity, namespace)
+ node = converter_module.transform(node, ctx)
+ with self.compiled(node, namespace, *tf_symbols) as result:
+ yield result
+
def make_fake_mod(self, name, *symbols):
fake_mod = imp.new_module(name)
for s in symbols:
@@ -114,17 +135,15 @@ class TestCase(test.TestCase):
for k, v in ns.items():
setattr(module, k, v)
- def parse_and_analyze(self,
- test_fn,
- namespace,
- namer=None,
- arg_types=None,
- include_type_analysis=True,
- owner_type=None,
- recursive=True,
- autograph_decorators=()):
+ def prepare(self,
+ test_fn,
+ namespace,
+ namer=None,
+ arg_types=None,
+ owner_type=None,
+ recursive=True,
+ autograph_decorators=()):
node, source = parser.parse_entity(test_fn)
-
if namer is None:
namer = FakeNamer()
program_ctx = converter.ProgramContext(
@@ -141,12 +160,5 @@ class TestCase(test.TestCase):
arg_types=arg_types,
owner_type=owner_type)
ctx = converter.EntityContext(namer, entity_info, program_ctx)
-
- node = qual_names.resolve(node)
- node = activity.resolve(node, entity_info)
- node = live_values.resolve(node, entity_info, {})
- if include_type_analysis:
- node = type_info.resolve(node, entity_info)
- node = live_values.resolve(node, entity_info, {})
- self.ctx = ctx
- return node
+ node = converter.standard_analysis(node, ctx, is_initial=True)
+ return node, ctx
diff --git a/tensorflow/contrib/autograph/examples/integration_tests/BUILD b/tensorflow/contrib/autograph/examples/integration_tests/BUILD
index 1368ce244c..2a4a0f75e7 100644
--- a/tensorflow/contrib/autograph/examples/integration_tests/BUILD
+++ b/tensorflow/contrib/autograph/examples/integration_tests/BUILD
@@ -22,7 +22,17 @@ py_test(
"keras_test.py",
],
srcs_version = "PY2AND3",
- visibility = ["//visibility:public"],
+ deps = [
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
+py_test(
+ name = "list_literals_test",
+ srcs = [
+ "list_literals_test.py",
+ ],
+ srcs_version = "PY2AND3",
deps = [
"//tensorflow:tensorflow_py",
],
diff --git a/tensorflow/contrib/autograph/examples/integration_tests/keras_test.py b/tensorflow/contrib/autograph/examples/integration_tests/keras_test.py
index a2fc7c550e..73125eb452 100644
--- a/tensorflow/contrib/autograph/examples/integration_tests/keras_test.py
+++ b/tensorflow/contrib/autograph/examples/integration_tests/keras_test.py
@@ -20,6 +20,8 @@ from __future__ import print_function
import tensorflow as tf
+from tensorflow.contrib import autograph
+
class MinimalKeras(tf.keras.Model):
@@ -27,11 +29,34 @@ class MinimalKeras(tf.keras.Model):
return x * 3
+class ModelWithStaticConditional(object):
+
+ def __init__(self, initial):
+ self.initial = initial
+ if self.initial:
+ self.h = 15
+
+ @autograph.convert()
+ def call(self):
+ x = 10
+ if self.initial:
+ x += self.h
+ return x
+
+
class KerasTest(tf.test.TestCase):
def test_basic(self):
MinimalKeras()
+ def test_conditional_attributes_False(self):
+ model = ModelWithStaticConditional(False)
+ self.assertEqual(model.call(), 10)
+
+ def test_conditional_attributes_True(self):
+ model = ModelWithStaticConditional(True)
+ self.assertEqual(model.call(), 25)
+
if __name__ == '__main__':
tf.test.main()
diff --git a/tensorflow/contrib/autograph/core/annos.py b/tensorflow/contrib/autograph/examples/integration_tests/list_literals_test.py
index b8937ce36a..680b6dbaf0 100644
--- a/tensorflow/contrib/autograph/core/annos.py
+++ b/tensorflow/contrib/autograph/examples/integration_tests/list_literals_test.py
@@ -12,28 +12,30 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Annotations specific to AutoGraph."""
+"""Tests of functions that use list literals."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from enum import Enum
+import tensorflow as tf
+from tensorflow.contrib import autograph as ag
-class NoValue(Enum):
- def __repr__(self):
- return self.name
+def list_used_as_tuple():
+ return tf.constant([1, 2, 3])
-class NodeAnno(NoValue):
- """Additional annotations used by AutoGraph converters.
+class ListLiteralsTest(tf.test.TestCase):
- These are in addition to the basic annotations declared in pyct/anno.py and
- pyct/static_analysis/annos.py.
- """
+ def test_basic(self):
+ converted = ag.to_graph(list_used_as_tuple)
+ result = converted()
- # The directives collection - see directives.py
- DIRECTIVES = (
- 'Dict depicting static directive calls. See the directives converter.')
+ with self.test_session() as sess:
+ self.assertAllEqual(sess.run(result), [1, 2, 3])
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb
new file mode 100644
index 0000000000..c10a5741f6
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_collatz_speed_test.ipynb
@@ -0,0 +1,299 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "aQkTGc-d8I1k"
+ },
+ "source": [
+ "This notebook runs a basic speed test for a simple algorithm that implements the process described in Collatz Conjecture.\n",
+ "\n",
+ "https://en.wikipedia.org/wiki/Collatz_conjecture"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "x5ChBlH09jk_"
+ },
+ "source": [
+ "### Imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "X-QAUpWdPxUh"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "wiKQu3w05eCa"
+ },
+ "outputs": [],
+ "source": [
+ "import numpy as np\n",
+ "from matplotlib import pyplot as plt\n",
+ "import tensorflow as tf\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "from tensorflow.python.eager import context"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "_cRFTcwT9mnn"
+ },
+ "source": [
+ "### Plotting helpers"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "ww7rc0GQ9pMu"
+ },
+ "outputs": [],
+ "source": [
+ "def plot_results(counts, times, title):\n",
+ " plt.plot(counts, np.array(times) * 1000., 'o')\n",
+ " plt.ylabel('Time (milliseconds)')\n",
+ " plt.xlabel('Collatz counter')\n",
+ " plt.title(title)\n",
+ " plt.ylim(0, 30)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "ESZGw9s9-Y5_"
+ },
+ "source": [
+ "### Collatz function definition"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "qeunWm9m-dT7"
+ },
+ "outputs": [],
+ "source": [
+ "def collatz(a):\n",
+ " count = 0\n",
+ " while a \u003e 1.1:\n",
+ " if a % 2 \u003c 0.1:\n",
+ " a //= 2\n",
+ " else:\n",
+ " a = 3 * a + 1\n",
+ " count += 1\n",
+ " return count\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "nnFmPDvScsDo"
+ },
+ "source": [
+ "# AutoGraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 301
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 9153,
+ "status": "ok",
+ "timestamp": 1531757473651,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "6fU4vlxYcsDe",
+ "outputId": "11b50f28-aced-4506-a743-4b749e9645c3"
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtcVGXCB/DfGRBUQA0ZURQvyIspm1reQkxNSPICgoqW\npWZu1vbmjZJV3Jc+axappVLu7guV25rU5g3wlq3iBd1wXHSN3hXy9ZaCgoOIIKAzMOf9g5dZkTkz\nB5i7v+9fzJlzzjzPHD2/Oc/znOcIoiiKICIiMkBh6wIQEZH9YkgQEZEkhgQREUliSBARkSSGBBER\nSWJIEBGRJIYEkQNYsWIFkpOTbV0MegQxJMipzJ49G8OHD4dWq5W9zeOPP45r164163O2bt2KqKgo\nDB48GKNGjcKcOXOwf//+5haXyO4xJMhpFBUV4fTp0xAEAYcPH5a9nSAIzfqc9957D1999RVWrFiB\nU6dO4fjx41iyZAmOHz8uuQ3vWSVHxZAgp5GRkYHBgwdj6tSpSE9P1y+fPXs2duzYoX+dnp6OWbNm\nAQBefvlliKKIqKgoPPXUU/juu+8AANu2bcP48eMxYsQIvPnmm7h58yYA4PLly/jmm2+wYcMGhISE\nwM3NDYIg4KmnnkJSUlKjz9ywYQNefPFFDB48GIWFhdi1axcmTpyIp556Cs899xy+/fZb/fqnTp3C\nmDFjkJKSgqeffhphYWHYs2dPo/rduXMHr7/+Op566inMnDmz2Vc/RC3BkCCnkZmZiaioKEyePBkn\nTpxAWVmZ5LoNVw9bt24FAOzevRtnzpzBhAkTkJOTg/Xr1+OTTz7BiRMn4Ofnh7i4OACASqVCt27d\nMGDAAJPl2bNnD1avXo0zZ86gW7du6Ny5M1JTU3HmzBkkJSUhKSkJ+fn5+vVLS0tRXl6O48eP48MP\nP0RiYiKuXLmif3/fvn1YuHAhcnNz4e/vj40bN7bkayJqFoYEOYXc3Fxcv34dEyZMQHBwMHr27Nnk\nl7hce/fuxfTp0/H444+jTZs2iIuLw9mzZ3H9+nXcvn0bSqWy0fpjxozBsGHDMHDgQNy4cUO/PCYm\nBn379oVCoYCrqyvGjBmDHj16AACGDh2K0NBQ5Obm6tcXBAFLlixBmzZtMGzYMIwZM0Z/ZQMA48eP\nx69+9SsoFApERkY2ChgiS2FIkFPIzMzEqFGj0LFjRwDApEmTkJGR0aJ93bx5E35+fvrX7du3R6dO\nnVBSUoJOnTrpm54aHDt2DCdPnoRWq23U99C1a9cm682cORMjRozAsGHDkJ2djdu3b+vf79ChA9zd\n3fWv/fz8Gn2Wj4+P/u927dqhurq6RfUjag5XWxeAqLXu37+P7777DjqdDqNGjQIAaDQaVFZWoqCg\nAO3bt8e9e/f066vVaqP769KlC65fv65/XV1djfLycvj6+qJTp05YvXo1/vWvfyE4OLjRdg93Tj/Y\nIa7RaLB48WKsW7cOYWFhUCgU+M///M9G21RUVODevXto27YtAODGjRsICgpq5rdBZF68kiCHd/Dg\nQbi4uOC7775DZmYmMjMz8d1332Ho0KHIzMxE//798be//Q337t3DL7/8gp07dzba3sfHp1En8OTJ\nk7Fr1y4UFBRAo9Fg/fr1GDRoEPz8/NCnTx/MnDkTcXFx+OGHH3D//n3odDqcOXPG6CgprVYLrVaL\nxx57DAqFAseOHcPf//73RuuIoohPPvkEWq0Wubm5OHr0KCZMmGDeL4uomXglQQ4vIyMD06ZNg6+v\nb6Pls2bNwvvvv4+9e/fip59+QmhoKPr164fIyEjk5OTo11u4cCHi4+Oh0WiwatUqPP/881i8eDEW\nLlyIiooKPPnkk1i/fr1+/cTERGzduhVJSUm4du0avLy80Lt3b2zcuFHfTPVwYHh4eGDlypVYvHgx\ntFotnn32WYSFhTVaR6lUomPHjnjmmWfQvn17rFq1Cr179zbzt0XUPIIlHzqk0Wjw0ksvQavVoq6u\nDhEREXjrrbdQWFiIuLg43LlzB8HBwVi7di1cXZlX9Og6deoU4uPjcfToUVsXhagRizY3ubm5YcuW\nLcjIyEBGRgays7Px448/4qOPPsK8efPw/fffw8vLq9EYdiIish8W75No164dgPqritraWgiCAJVK\nhYiICAD1wwQPHjxo6WIQEVELWDwkdDodoqOjERoaitDQUPj7+6NDhw5QKOo/umvXrk2GFBI9aoYP\nH86mJrJLFg8JhUKhb2rKy8vDxYsXm6zT3LlziIjIOqw2BNbT0xPDhg3Djz/+iIqKCuh0OgBAcXEx\nunTpYnJ7TpBGRGR9Fh1SVFZWhjZt2sDLywv37t1DTk4OFixYgBEjRuDAgQOYOHEi0tPTmwwFNEQQ\nBKjVlZYsrk0plV6sn4Ny5roBrJ+jUyq9WrW9RUNCrVZj+fLl0Ol00Ol0mDhxIsaMGYOAgADExcUh\nOTkZ/fv3x/Tp0y1ZDCIiaiGL3idhbs6e9qyfY3LmugGsn6Nr7ZUEp+UgIiJJDAkiIpLEkCAiIkkM\nCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAki\nIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKS\nxJAgIiJJDAkiIpLEkCAiIkkMCSIikuRqyZ0XFxcjPj4epaWlcHFxwYwZMzB79mxs2rQJ27ZtQ+fO\nnQEAS5cuxejRoy1ZFCIiagGLhoSLiwtWrFiB/v37o6qqClOnTsXIkSMBAPPmzcO8efMs+fFERNRK\nFg0JpVIJpVIJAPDw8EDfvn1x8+ZNAIAoipb8aCIiMgOr9UkUFhaioKAAAwcOBACkpaVhypQpWLly\nJSorK61VDCIiagarhERVVRUWLVqEhIQEeHh4YNasWTh06BAyMzPh4+ODpKQkaxSDiIiaSRAt3O5T\nW1uL119/HaNHj8bcuXObvF9UVIQ33ngDe/bssWQxiIioBSzaJwEACQkJCAwMbBQQarVa31dx8OBB\nBAUFydqXWu28zVJKpRfr56CcuW4A6+folEqvVm1v0ZA4ffo09uzZg6CgIERHR0MQBCxduhR79+5F\nfn4+FAoFunfvjlWrVlmyGERE1EIWDYkhQ4YgPz+/yXLeE0FE5Bh4xzUREUliSBARkSSGBBERSWJI\nEBGRJIYEERFJYkgQEZEkhgQREUliSBARkSSGBBERSWJIEBGRJIYEERFJYkgQEZEkhgQREUliSBAR\nkSSGBBERSWJIEBGRJIYEERFJYkgQEZEk2Y8vvXfvHtRqNdzd3dGlSxdLlomIiOyE0ZDQ6XTIyMjA\n9u3bUVBQAE9PT2g0Gri6uiI8PByvvPIK+vTpY62yEhGRlRkNiRdffBGDBw/GihUrEBwcDBcXFwDA\nrVu3cPz4cSQmJuKFF17ApEmTrFJYIiKyLkEURVHqzbKyMnh7exvdgZx1zEWtrrTK59iCUunF+jko\nZ64bwPo5OqXSq1XbG+24NnTyv3XrFs6ePWt0HSIicg6yRjfNmjULlZWVqKioQHR0NFauXIk1a9ZY\numxERGRjskKiuroaXl5eOHLkCCIjI7Fnzx6cOHHC0mUjIiIbkxUSGo0GAKBSqTBy5EgoFAp9JzYR\nETkvWSExfPhwREREIDc3F8OHD0dFRQUUCt6HR0Tk7GTdTPfuu++ioKAA/v7+cHNzw927d7F69WpL\nl42IiGzMaEhcuHBB/3ebNm1QXFysf+3m5ma5UhERkV0wGhILFiyAIAgQRRE3btyAp6cnAODu3bvo\n1q0bDh8+bHTnxcXFiI+PR2lpKVxcXBAbG4s5c+bgzp07WLp0KYqKitCjRw9s3LgRXl6tG8tLRETm\nZzQkGkJg9erVGDJkCCZMmAAAOHDgAM6dO2dy5y4uLlixYgX69++PqqoqTJ06FaGhodi1axdCQkLw\n2muvITU1FSkpKXjnnXfMUB0iIjInWb3PeXl5+oAAgOeffx4nT540uZ1SqUT//v0BAB4eHujbty9K\nSkqQlZWFmJgYAEBMTAwOHTrUkrITEZGFyQqJmpoa5Obm6l/n5uaipqamWR9UWFiIgoICDBo0CLdu\n3YKPjw+A+iC5fft2s/ZFRETWIXt0U1xcHNq1awcAuH//Pj7++GPZH1JVVYVFixYhISEBHh4eEASh\nRYVt7Rwk9o71c1zOXDeA9XuUyQqJoUOH4tChQ7h8+TJEUURAQIDs0U21tbVYtGgRpkyZgvDwcABA\n586dUVpaCh8fH6jVatnzPzn7JFysn2Ny5roBrJ+js+gEfw+qq6uDm5sbXF1dcfXq1UbDY41JSEhA\nYGAg5s6dq182btw47Nq1CwCQnp6OsLCwZhabiIisQdaVRFpaGj766CN06tRJ31QkCAKysrKMbnf6\n9Gns2bMHQUFBiI6OhiAIWLp0KV577TUsWbIEO3fuhJ+fH5KTk1tfEyIiMjtZIbF582bs3bsX3bt3\nb9bOhwwZgvz8fIPvffnll83aFxERWZ+s5ialUtnsgCAiIscn60pi5MiRWLt2LSZNmgR3d3f98sDA\nQIsVjIiIbE9WSGRkZACov9O6gZw+CSIicmyyQsLUHE1EROScZIUEUD8jrEqlAgA8/fTT6Nu3r8UK\nRURE9kFWx3VGRgZeeeUV5OfnIz8/H/PmzcPu3bstXTYiIrIx2UNg09PToVQqAQBqtRrz589HVFSU\nRQtHRES2JfuO64aAePhvIiJyXrJComfPnvjkk09QUlKCmzdvYtOmTfD397d02YiIyMZkhcTvf/97\nXL58GVFRUYiKisKlS5ewatUqS5eNiIhsTFafROfOnbFhwwZLl4WIiOyMrCuJ1NRUlJeX61/fvn0b\nn3/+ucUKRURE9kFWSOzbtw+dOnXSv37sscewd+9eixWKiIjsg6yQEEWxybK6ujqzF4aIiOyLrJDo\n3bs3/vznP0MUReh0OmzevBk9e/a0dNmIiMjGZIXEypUrceTIEQwcOBCDBw/GsWPHkJiYaOmyERGR\njcka3eTr64stW7aguroaANC+fXuLFoqIiOyD7D6J7du3449//CPat2+PwsJCnDlzxtJlIyIiG5MV\nEklJSTh58iQOHToEAPDw8MAHH3xg0YIREZHtyQoJlUqFjz76CG3btgVQPwT2/v37Fi0YERHZnqyQ\ncHd3hyAI+tc6nc5iBSIiIvshq+M6KCgIu3fvhiiKKCwsRGpqKoYMGWLpshERkY3JupJYvnw5Tp06\nBbVajdjYWNTV1WHZsmWWLhsREdmYrCsJT09PrF692tJlISIiOyPrSmL//v24e/cuACA5ORnz58/H\n//zP/1i0YEREZHuyQuJPf/oTPD09kZeXhxMnTiA6OppXFkREjwBZIeHqWt8q9fe//x2xsbGIjIzk\nEFgiokeArJAQBAG7d+/Gvn37EBISAgDQarUWLRgREdmerJD43e9+hwMHDiA2Nhb+/v64cuUKRowY\nYXK7hIQEjBw5EpGRkfplmzZtwujRoxETE4OYmBhkZ2e3vPRERGRRgmjoYRFmkpubCw8PD8THx2PP\nnj0A6kPCw8MD8+bNa/b+1OpKcxfRbiiVXqyfg3LmugGsn6NTKr1atb3RIbB/+ctfMHfuXKxdu9bg\n+/Hx8UZ3PnToUBQVFTVZbsFcIiIiMzIaEu7u7gDMPzV4WloaMjMz8atf/QrLly+Hl1frko6IiCzD\nos1NAFBUVIQ33nhD39xUVlaGxx57DIIgYMOGDVCr1ZxRlojIThm9kkhLSzO68UsvvdTsD/T29tb/\nPWPGDLzxxhuyt3X2dkPWzzE5c90A1s/RWbRPwhx3VT98oaJWq6FUKgEABw8eRFBQUKs/g4iILMNo\nSCQlJbVq52+//TZUKhXKy8sxduxYLFy4ECqVCvn5+VAoFOjevTtWrVrVqs8gIiLLMRoSx44dM7rx\nmDFjjL7/8ccfN1k2bdo0GcUiIiJ7YDQkPv/8c8n3BEEwGRJEROTYjIbEV199Za1yEBGRHTIaEteu\nXYO/vz8uXLhg8P3AwECLFIqIiOyD0ZBYvXo1UlJSsGDBgibvCYKArKwsixWMiIhsz2hIpKSkAAAO\nHz5slcIQEZF9kfX4UgCoqalBcXEx6urq9MvY3ERE5NxkhcSWLVuwYcMGdOzYEQpF/ezibG4iInJ+\nskLiL3/5Cw4cOABfX19Ll4eIiOyIrIcOde3alQFBRPQIknUlsXDhQqxcuRJjxozRTx8OmL7jmoiI\nHJuskDhy5AiOHDmCK1euNOqTYEgQETk3WSFx8OBBHD58GG3btrV0eYiIyI7I6pPw9/eHq6vs0bJE\nROQkZJ35e/Xqhblz5yI8PBxubm765S156BARETkOWSGh1WrRs2dPnD9/3tLlISIiOyIrJFr78CEi\nInJMRvskTD2+VKPR4OLFi2YtEBER2Q+TE/zV1NRg8uTJGDRoEHx8fHD//n1cvnwZx48fx7Fjx7B8\n+XL07dvXWuUlIiIrMhoSn376KfLy8vDtt9/iD3/4A4qLi9GuXTsEBQUhPDwcaWlp8PT0tFZZiYjI\nykz2SQwcOBADBw60RlmIiMjOyLpPgoiIHk0MCSIiksSQICIiSQwJIiKSJCskbt26hXfeeUc/DUdB\nQQG++eYbixaMiIhsT1ZI/O53v8OQIUNQUVEBAAgICMDXX39t0YIREZHtyQqJkpISvPjii3BxcQEA\nuLm56Z8rQUREzkvWmf7hacIrKiogiqJFCkRERPZDVkiMHz8eiYmJqKqqwq5du/Dqq69i2rRpJrdL\nSEjAyJEjERkZqV92584dvPrqq4iIiMD8+fNRWVnZ8tITEZFFyQqJX//61xg6dCiCg4Nx7NgxzJ49\nG3PnzjW53dSpU/HFF180WpaamoqQkBB8//33GDFiBFJSUlpWciIisjjZj5uLiopCVFRUs3Y+dOhQ\nFBUVNVqWlZWFrVu3AgBiYmIwe/ZsvPPOO83aLxERWYeskLh16xa2bt2Kq1evora2Vr88OTm52R9Y\nVlYGHx8fAIBSqcTt27ebvQ8iIrIOWSHx5ptvYsCAAQgJCdGPcLIFpdLLZp9tDayf43LmugGs36NM\nVkjU1NTg3XffNcsHdu7cGaWlpfDx8YFarYa3t7fsbdVq5+3kViq9WD8H5cx1A1g/R9faAJTVcT1o\n0CD8/PPPLfqAh4fKjhs3Drt27QIApKenIywsrEX7JSIiy5N1JfHCCy/g5ZdfRteuXeHu7q5fvmPH\nDqPbvf3221CpVCgvL8fYsWOxcOFCLFiwAIsXL8bOnTvh5+fXon4NIiKyDlkhsWzZMrzxxhsYMGBA\ns/okPv74Y4PLv/zyS9n7ICIi25EVEu7u7pg/f76ly0JERHZGVp/EM888g+zsbEuXhYiI7IysK4lt\n27YhNTUVHh4ecHNzgyiKEAQBOTk5li4fERHZkKyQ2Llzp6XLQUREdkhWSHTv3t3S5SAiIjtkNCSW\nLVuGdevWYdq0aRAEocn7pobAEhGRYzMaEg0zvf72t7+1SmGIiMi+GA2Jr7/+Gh988AGGDx9urfIQ\nEZEdMToENj8/31rlICIiO8QHVRMRkSSjzU3nz59HSEhIk+W8T4KI6NFgNCR69+6N1NRUa5WFiIjs\njNGQcHNz4z0SRESPMKN9Em3atLFWOYiIyA4ZDYlt27ZZqxxERGSHOLqJiIgkMSSIiEgSQ4KIiCQx\nJIiISBJDgoiIJDEkiIhIEkOCiIgkMSSIiEgSQ4KIiCTJesY1EdGjTnWuBPtyruB6aTX8fNpjUkhv\njBjga+tiWRxDgojIBNW5EqTs/pf+daG6Sv/a2YOCIUFEVuHIv8T35VyRWP6Lw9ShpRgSRGRxjv5L\n/HpptcHlN25VWbkk1mezkBg3bhw8PT2hUCjg6uqKHTt22KooRGRhjv5L3M+nPQrVTQOhW2cPG5TG\numwWEoIg4KuvvkLHjh1tVQQishJH/yU+KaR3oyuhfy/v1ei1IzepSbFZSIiiCJ1OZ6uPJ3I69nyC\nsuYvcUt8Dw3b78v5BTduVaFbZw9MCunVaL+O3qQmxaZXEvPnz4cgCJg5cyZmzJhhq6IQOTx7P0HJ\n/SXeWpb8HkYM8DW6D0dvUpNis5D461//CqVSibKyMsybNw8BAQEYOnSorYpDZHXm/MVr7ycoOb/E\nzcGW34OjN6lJsVlIKJVKAIC3tzeee+45/PTTTyZDQqn0skbRbIb1c1zNrVv2PwsN/uLt0KEtRj/Z\no9F627P+F1dLKtHT1wuxYf/R6P0G129Jn6DM8b2bYx+Tx3hh8pjAZm0jt/4NWvo9mKN+Pbt64cqN\niibL/X29HPrfvk1CoqamBjqdDh4eHqiursaJEyfw1ltvmdxOra60QulsQ6n0Yv0cVEvq9s33BRLL\nf0b/HvWDOR5uOrlyowLrtp5GRcW9Jr+K/TpLt/m39nu31bFrTv0btOR7MFf9Iob5G2xSixjmb9N/\n+60NKJuERGlpKd566y0IgoC6ujpERkZi1KhRtigKkU3IaZpoTtOJsTZ/e+7QNqYlTUfW6vswxFpN\natZmk5Dw9/dHZmamLT6ayCKaeyI2NdpHda7E4PuA4TZuqRMUALvu0DbGVJAa+85tdaI21bntiHjH\nNVErSfUvANInYlO//A2916Cjp5vB5YZOUIlfqAyuay8d2sYYC1JTo5jsvW6OhFOF0yNLda4EiV+o\n8Os1R5D4hQqqcyUt2s/2rP81uHxfzi+S24wY4IvXo4LRQ+kJF4WAHkpPvB4VjBEDfCWbWRqUVdyX\nXVZHHnEzKaS3xPJeRpuiyLx4JUGPJHOOp79aYrhT0tSJWOoXr9SJ/UFyrwQceToJY01Hn+05Z3Ab\nRwg/R8OQoEeSOcfT9/Q1PPSxpSdiqRP7gwrVd/HrNUdM9n/YsiPXHKSC1JHDz9GwuYkeSVK/1otK\n7za7CSo27D8MLm/piViqmeVhOlHUXwFJldNYs5YjM9YURebFKwl6JEn9EhVF6JfLbYIa/WQPVFTc\nM9uImobtth+5gLLK+7K2MXYF5IwdubYexfQoYUhQqznSOPyGshaVym+7bugMNVZHS5yI5QYE0LQt\n3pGOSUs5Y/jZI4YEtZjqXEmTX7v2NA7/4RNlv56PIet0YZP1BAF4zNNd8qRcVHrX6vcaSPWZtHFR\nQFvXdPbkB9vi7X2yP3IsDAlqEVNj+c05Dr8lv4oNnSilOoMf83JHWYX0r3ZXheETsyXvNZDqM6mV\nmF7/wbZ4e5/sjxwLQ4JaxNRYfnMNRZTzq9hQiJgq34OMBQQgfWK25HBLqT6T7j6e/3+fgHRbvCPf\nG0H2hyHhpCzdJm1qLL+5hiJuP3LB8PKjFzBigK9kiAiCWT4er0cFY1/OFbMPtzR1fIwNXTXVFs/h\noWRODAknZI02aVNj+Zs7FFHqpCnVT9Dw61/qikGqicgQby/D/RE9lJ7678uc9xrIOT6tGb3j6PdG\nkH1hSDgha7RJS52IvDu4I3ZsYLM+x9hJ09R2zW27DxvSAz9fLTc6CV6DhvfMPdxS7vFp6egdDg8l\nc2JIOCFrtEmb80Rk7KQp9SsfqD+xu0g0K8lpuzf0eVLrmnO4pbWOD0OBzIEh4YRMzZ7Zmr4KS/R1\nSJ00C9V3YaproU40vFxO2/2DrHlSZZ8BORKGhBOSagrq17NTq/oqpJqFUnf/C92VHvrASDt4Htln\ni6CtE+EiAO3atkH1/Vr4dTYcKsb6NyQyoIk2LgroRNEhmlbYZ0COhCHhhKSaglrTV6E6V4LN+wzP\nvCni34Fx4qcb+NflMv17dSJwt0YLQDqUpE6azaETRXwW/2yr9mEt7DMgR8KQcFKGmk9aOr2yqRvn\nHvRgQEgx1EELGO48lsvRmmrYZ0COgiHhhKT6DVraFt6cG9PkkHr8ptT9CA28vdwBwfDNb2yqIbIM\nThXuZBp+9Reqq5pMJd3S6ZXlPASnOaRCydQU2bHPBuKjN0OdcuprInvFKwknY6zfYdX84fq/m9MW\nLuchOA2C+3ibbHKSCqUH2+qLSu/CVaFAnU4Hv/8fzvrgjWYMBSLrYEg4GVNj8FtygpXqWDZ0Y9q/\nRzddh7ZOBxdBQLu2rqi5XysrlBgARPaFIWFhDz6/QCEIqNPVD+r09nJH7LPNuzNZDkuMwW/uaJyX\nngvCS88FNVqmVHpBrTb8LGgisl8MiYcYegbBz1dvm7x5zFBnMdB4xE6d+O9R/2WV9y0yx7+lxuDz\nFz7Ro4kh8QBTzyCQGucvdZOZt5e7yc809xz/HINPRObEkHiA3KGeD5/YpbaT8/hJS8zxz1/9RGQu\nHAL7ALlDPR8+sbdmiKij3QRGRI8WhsQD/Hzay1rv4RO71HZympt4ExgR2TObhUR2djaef/55RERE\nIDU1tUX7UJ0rQeIXKvx6zREkfqGC6lxJq8pk6mauf6/X66HXhreLfTZQf+OXQgBcFP+e09Tby503\ngRGR3bNJn4ROp8N7772HL7/8El26dMH06dMRFhaGvn37Sm4zZdnuRrOIWuLpa4Y6ffv17GTwXgBT\n2z188xcRkSOySUjk5eWhV69e6N69OwBg0qRJyMrKMhoSOp3YKAgs9fS11jwNjGFARM7GJs1NJSUl\n6Natm/61r68vbt68KXv7fTm/WOXpXkREjzqbhIQoyn2UjGE3blVJdhZztBARkfnYpLmpa9euuH79\nuv51SUkJunTpInt7f18vxIb9B9ZtPd3kvRcj+kGp9DJLOa3NUcstlzPXz5nrBrB+jzJBbO3P+hao\nq6vD888/jy+//BJKpRKxsbFYv3690T4JIiKyPptcSbi4uOC//uu/8Oqrr0IURUyfPp0BQURkh2xy\nJUFERI6Bd1wTEZEkhgQREUliSBARkSS7DwlzzPFkb8aNG4eoqChER0dj+vTpAIA7d+7g1VdfRURE\nBObPn4/KSsd5iltCQgJGjhyJyMhI/TJj9Vm9ejXGjx+PKVOmID8/3xZFbhZD9du0aRNGjx6NmJgY\nxMTEIDs7W/9eSkoKxo8fjwkTJuDEiRO2KLJsxcXFmDNnDiZOnIjIyEhs2bIFgPMcv4fr99VXXwFw\nnuOn0WiMkXxQAAAKiUlEQVQQGxuL6OhoREZGYtOmTQCAwsJCzJgxAxEREYiLi0Ntba1+/aVLl2L8\n+PGYOXNmo1sRJIl2rK6uTgwPDxcLCwtFjUYjRkVFiRcuXLB1sVpt3LhxYnl5eaNla9euFVNTU0VR\nFMWUlBRx3bp1tihai/zjH/8Qz507J06ePFm/TKo+R48eFV977TVRFEXx7NmzYmxsrPUL3EyG6vfp\np5+KmzdvbrLuhQsXxClTpoharVa8du2aGB4eLup0OmsWt1lu3rwpnjt3ThRFUbx79644fvx48cKF\nC05z/KTq5yzHTxRFsbq6WhRFUaytrRVjY2PFs2fPiosXLxb3798viqIoJiYmit98840oiqKYlpYm\nvvvuu6IoiuK+ffvEJUuWmNy/XV9JPDjHU5s2bfRzPDk6URSh0+kaLcvKykJMTAwAICYmBocOHbJF\n0Vpk6NCh6NChQ6NlD9en4bhlZWUhOjoaADBo0CBUVlaitLTUugVuJkP1AwzPHJCVlYWJEyfC1dUV\nPXr0QK9evZCXl2eNYraIUqlE//79AQAeHh7o27cvSkpKnOb4GapfwxRAznD8AKBdu3YA6q8Samtr\nIQgCVCoVIiIiADQ+nzx4XCMiIpCTk2Ny/3YdEq2d48leCYKA+fPnY9q0adi+fTsA4NatW/Dx8QFQ\n/w/79u3btixiq5WVlTWqT1lZGQDg5s2b6Nq1q349X19flJS0bop3W0lLS8OUKVOwcuVKfXOMoX+z\njlK/wsJCFBQUYNCgQU3+PTrD8Wuo38CBAwE4z/HT6XSIjo5GaGgoQkND4e/vjw4dOkChqD+9d+3a\nVV+HB4+fi4sLOnTogPLycqP7t+uQMJT0zuCvf/0rdu3ahc8++wxpaWnIzc2FIAimN3QCho6pI9Z9\n1qxZOHToEDIzM+Hj44MPP/wQgOPWr6qqCosWLUJCQgI8PDwky+ws9XOm46dQKJCRkYHs7Gzk5eXh\n4sWLTdZpqMPD9RNF0WT97DokWjvHk71SKpUAAG9vb4SHhyMvLw+dO3fWX7ar1Wp4e3vbsoitJlUf\nX19fFBcX69crLi52yGPq7e2t/881Y8YMfZNE165dcePGDf16jlC/2tpaLFq0CFOmTEF4eDgA5zp+\nhurnTMevgaenJ4YNG4Yff/wRFRUV+ibtB+vw4PGrq6vD3bt30bFjR6P7teuQeOKJJ3D16lUUFRVB\no9Fg3759CAsLs3WxWqWmpgZVVfXTmVdXV+PEiRMICgrCuHHjsGvXLgBAenq6w9Xz4V8oUvUJCwtD\nRkYGAODs2bPo0KGDvlnDnj1cP7Varf/74MGDCAoKAlBf7/3790Oj0eDatWu4evWqvnnDXiUkJCAw\nMBBz587VL3Om42eofs5y/MrKyvRNZffu3UNOTg4CAwMxYsQIHDhwAEDj4zdu3Dikp6cDAA4cOICn\nn37a5GfY/bQc2dnZeP/99/VzPC1YsMDWRWqVa9eu4a233oIgCKirq0NkZCQWLFiA8vJyLFmyBDdu\n3ICfnx+Sk5MNdpbao7fffhsqlQrl5eXw8fHBwoULER4ejsWLFxusz6pVq3D8+HG0a9cOSUlJCA4O\ntnENjDNUP5VKhfz8fCgUCnTv3h2rVq3SnyxTUlKwY8cOuLq6YuXKlRg1apSNayDt9OnTePnllxEU\nFARBECAIApYuXYqBAwdK/nt0pOMnVb+9e/c6xfH7+eefsXz5cuh0Ouh0OkycOBG/+c1vcO3aNcTF\nxaGiogL9+/fHunXr0KZNG2g0Gixbtgz5+fno1KkT1q9fjx49ehj9DLsPCSIish27bm4iIiLbYkgQ\nEZEkhgQREUliSBARkSSGBBERSWJIEBGRJIYE2b3a2lokJycjIiICkZGRmDRpEtasWYO6ujqj261Y\nsQJpaWkA6qeGXrt2rcnPOnToEH766SezlNsSioqKsG3bNlsXgx4hDAmye8uXL8fFixeRkZGBPXv2\nYPfu3QgICIBGozH7Z2VlZdn1rJ+FhYX49ttvW7StqVAlMsTV1gUgMuaXX35BVlaW/g5foH72ytjY\nWAD1M2CuW7dO/3CYUaNGIT4+3uikZefPn8fvf/971NTUQKPRYMaMGZgzZw5OnDiBw4cPIycnBzt2\n7MArr7yCwsJCHDx4EIIgQKPR4NKlS/jHP/4BT0/PRvv85z//iXXr1qGqqgqCICA+Ph4jR45EXl4e\nPvjgA9TU1KBdu3ZYuXIlnnjiCZw6dQpr1qzBzp07AaDR61OnTuGDDz7AwIEDcfbsWSgUCqxfvx4B\nAQF47733UFRUhJiYGPTs2RPJycm4dOkSkpKSUF5eDq1Wizlz5mDq1KkAgMcffxzLli3D0aNHMWzY\nMCxatMjsx4icnFmeekFkIfv37xejo6Ml3//666/FefPmibW1taJWqxXnzp2rf8DK8uXLxa1bt4qi\nWP+QoDVr1oiiKIpVVVWiRqPR/z1x4kTx4sWLTbZ52LJly8QPP/ywyfLy8nIxNDRUPHv2rCiKoqjT\n6cSKigpRo9GIY8eOFXNyckRRFMUffvhBHDt2rKjVakWVSiVOmzZNv48HX6tUKjE4OFjMz88XRVEU\n//SnP4nvvPNOk/VEsf5BMzExMeKlS5dEUax/sE5ERIT+db9+/cTPP/9c8vsjMoVXEmTXRBOzxuTk\n5CAmJgYuLi4AgKlTp+LQoUN44YUXJLepqanBu+++i4KCAigUCqjVahQUFCAgIEBym40bN6Kmpga/\n/e1vm7x39uxZBAYGYtCgQQDqp2X28vLC+fPn4ebmpp9ELSQkBG5ubrh8+bLJevfp0wePP/44gPqH\n+xw9etTgeleuXMGlS5cQFxen/660Wi0uXryIPn36AID+IUFELcGQILsWHByMK1euoLKyEl5eXk3e\nFw3Mh29qfvz169dDqVRi7dq1+gdAGevf2LlzJ06ePKl//rOhMshd3lBeFxeXRk8nvH//fqP13N3d\n9X+7uLjon1FsaH/e3t76mT0fJggC2rdvb/A9IjnYcU12rVevXhg3bhwSExP1U6zX1dVhy5YtqKmp\nwciRI5Geno7a2lpotVpkZGQgNDTU6D4rKyvRrVs3CIKA8+fPIzc3V/+eh4cH7t69q3/9ww8/4LPP\nPsMf//hHuLm5Gdzfk08+iQsXLuDHH38EUN9PUlFRgYCAAGi1Wpw6dQoAcPLkSdTW1qJ3797o0aMH\nCgsLUVlZCVEUsW/fPlnfh6enp35qaKD+iqNt27bIzMzUL7t06ZL+uzJ1JUZkCq8kyO6tWbMGn376\nKaZOnQo3NzeIoojRo0fDzc0NM2fOxNWrV/XP7X3mmWf0ndpSfvOb3yA+Ph67d+9Gz549MWzYMP17\nU6ZMwYoVK3DgwAG88sor2LlzJ2pqajB//nz9VUBaWlqjX+cdO3bEpk2bkJSUhOrqari4uCA+Ph4h\nISH45JNPsHr1an3H9aeffgpXV1f4+vpi3rx5iImJgb+/P5544glcuHDB5HfRr18/9OnTB5GRkQgI\nCEBycjL++7//G++//z42b96Muro6+Pj4YOPGjQDs/6lqZP84VTgREUlicxMREUliSBARkSSGBBER\nSWJIEBGRJIYEERFJYkgQEZEkhgQREUliSBARkaT/AzLfG+oMx+5pAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "\u003cmatplotlib.figure.Figure at 0x7fc3b259add0\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "counts = []\n",
+ "times = []\n",
+ "for n in np.logspace(0, 7, 50):\n",
+ "\n",
+ " with tf.Graph().as_default():\n",
+ " tf_collatz = ag.to_graph(collatz)\n",
+ " count = tf_collatz(tf.constant(n, dtype=tf.float32))\n",
+ " with tf.Session() as sess:\n",
+ " count_value = sess.run(count)\n",
+ "\n",
+ " res = %timeit -n10 -r1 -o -q sess.run(count)\n",
+ " counts.append(count_value)\n",
+ " times.append(res.best)\n",
+ " \n",
+ "plot_results(counts, times, 'AutoGraph')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "RRENYzLRF_f3"
+ },
+ "source": [
+ "# Eager"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 301
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 5003,
+ "status": "ok",
+ "timestamp": 1531757478713,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "dhDf8LLdF_f-",
+ "outputId": "3de0a5a5-7a11-4b41-8ab0-e4e21ce8d59b"
+ },
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3XtYVWW+B/Dv2hshBdSQHaighhwas7Qeb6GFDjIyI3LZ\nGphdJLLMzqSlKaPQsTPm5KhZkZ7moKOnManGK17wsUfIS87QNj2jnEnIg5cQEtyAyDWBvdf5g4d9\nBPbaLGCvfeP7+QvW2mvt38tGvq71vut9BVEURRAREZmhsncBRETkuBgSREQkiSFBRESSGBJERCSJ\nIUFERJIYEkREJIkhQUREktzsXQCRvYWHh6OiogJqtRqiKEIQBMyePRtvv/22vUsjsjuGBBGA9PR0\nPPHEE3Z5b4PBALVabZf3JuoMbzcRATA38cCNGzeQmJiISZMmITQ0FMuXL0dtba1p//fffw+tVotx\n48bhjTfewNKlS5GWlmbaf+LECcTFxWHChAmYN28efvjhB9O+8PBwbNu2DTExMXj88cdhNBqVbSBR\nNzEkiCSIoohFixbhb3/7G44ePYqysjJs3rwZANDU1ITFixdjzpw5OHv2LGbNmoXjx4+bjv3++++R\nmpqKd999F2fPnsXcuXPx2muvoampyfSao0ePYtu2bTh37hxUKv5TJMfE30wiAL/97W8xceJETJgw\nARMnTsSePXswbNgwhIaGws3NDffffz8SExPx3XffAQAuXLgAg8GA559/Hmq1Gr/61a8wZswY0/n2\n7NmDZ555Bo8++igEQUBcXBzc3d1x8eJF02vmz58PPz8/uLu727y9RHKxT4IIwCeffNKhT6KyshJr\n167FuXPnUF9fD4PBgIEDBwIA9Ho9/Pz82rx+8ODBpq9/+uknHDx4ELt27QLQclXS3NyMW7dumV7j\n7++vVHOIrIYhQQTzfRKbNm2CIAg4cuQI+vfvj+zsbKxduxYAoNFoUFZW1ub1N2/exLBhwwC0BMCi\nRYvw6quvKl88kYJ4u4lIQl1dHTw9PeHl5YWysjJs377dtO+xxx6DWq1GRkYGDAYDsrOzkZeXZ9qf\nkJCAL7/80rStvr4ep06dQn19vc3bQdQTvJIgAvDaa69BpVKZnpOYMmUK3nzzTaxYsQLjx4/H8OHD\nERsbi08//RQA0KdPH2zevBmpqanYtGkTwsLCEB4ebupfeOSRR/Duu+9izZo1KCoqgoeHB8aNG4cJ\nEyYAAARBsFdTibpEUHLRocbGRjz33HNoamqCwWBAZGQkXn/9dRQXF2PZsmW4c+cORo8ejQ0bNsDN\njXlFzi0hIQHz5s2DVqu1dylEVqPo7SZ3d3fs3LkTmZmZyMzMxOnTp3Hx4kW8//77SEpKwldffQVv\nb2/s3btXyTKIFPHdd9+hvLwcBoMBBw4cwOXLl/HUU0/Zuywiq1K8T6Jv374AWq4qmpubIQgCdDod\nIiMjAQBarbbN+HIiZ3Ht2jXExsZi/Pjx+PTTT/Hxxx/D19fX3mURWZXi93iMRiNmz56NoqIiPPfc\ncwgMDET//v1NDw/5+/u3GRZI5CwSEhKQkJBg7zKIFKX4lYRKpTLdasrLy8OVK1c6vIadeEREjslm\nQ2C9vLwwYcIEXLx4EdXV1aa5akpLS/HAAw90eryC/etERCRB0dtNlZWV6NOnD7y9vfHzzz8jNzcX\nCxcuxKRJk3Ds2DHMnDkTBw4cwPTp0zs9lyAI0OtrlCzXrjQab7bPSbly2wC2z9lpNN49Ol7RkNDr\n9Vi5ciWMRiOMRiNmzpyJqVOnIigoCMuWLUNaWhpGjRqFp59+WskyiIiomxR9TsLaXD3t2T7n5Mpt\nA9g+Z9fTKwlOy0FERJIYEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQURE\nkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIY\nEkREJIkhQUREkhgSREQkiSFBRESSGBJERCSJIUFERJIYEkREJIkhQUREkhgSREQkyU3Jk5eWliI5\nORnl5eVQq9VISEjACy+8gC1btmD37t0YNGgQAGDp0qUICwtTshQiIuoGRUNCrVZj1apVGDVqFOrq\n6jB79mxMnjwZAJCUlISkpCQl356IiHpI0ZDQaDTQaDQAAE9PT4wcORK3bt0CAIiiqORbExGRFdis\nT6K4uBgFBQUYM2YMACAjIwOxsbFITU1FTU2NrcogIqIusElI1NXVYcmSJUhJSYGnpyeeffZZZGdn\n4+DBg/D19cW6detsUQYREXWRICp836e5uRmvvvoqwsLCkJiY2GF/SUkJFi1ahMOHDytZBhERdYOi\nfRIAkJKSguDg4DYBodfrTX0Vx48fR0hIiKxz6fWue1tKo/Fm+5yUK7cNYPucnUbj3aPjFQ2J8+fP\n4/DhwwgJCUFcXBwEQcDSpUtx5MgR5OfnQ6VSYejQoVizZo2SZRARUTcpGhLjxo1Dfn5+h+18JoKI\nyDnwiWsiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIi\nSQwJIiKSxJAgIiJJDAkiIpLEkCAiIkkMCSIiksSQICIiSQwJIiKSxJAgIiJJspcv/fnnn6HX6+Hh\n4YEHHnhAyZqIiMhBWAwJo9GIzMxM7NmzBwUFBfDy8kJjYyPc3NwQERGBF198EQ8++KCtaiUiIhuz\nGBLz5s3DY489hlWrVmH06NFQq9UAgIqKCnzzzTdYvXo1nnnmGURFRdmkWCIisi1BFEVRamdlZSV8\nfHwsnkDOa6xFr6+xyfvYg0bjzfY5KVduG8D2OTuNxrtHx1vsuDb3x7+iogIXLlyw+BoiInINskY3\nPfvss6ipqUF1dTXi4uKQmpqK9evXK10bERHZmayQqK+vh7e3N06cOIHo6GgcPnwYZ86cUbo2IiKy\nM1kh0djYCADQ6XSYPHkyVCqVqRObiIhcl6yQmDhxIiIjI3Hu3DlMnDgR1dXVUKn4HB4RkauT9TDd\nO++8g4KCAgQGBsLd3R21tbVYu3at0rUREZGdWQyJwsJC09d9+vRBaWmp6Xt3d3flqiIiIodgMSQW\nLlwIQRAgiiJu3rwJLy8vAEBtbS0GDx6Mr7/+2uLJS0tLkZycjPLycqjVasTHx2P+/Pm4c+cOli5d\nipKSEgQEBOCjjz6Ct3fPxvISEZH1WQyJ1hBYu3Ytxo0bh9/85jcAgGPHjuHSpUudnlytVmPVqlUY\nNWoU6urqMHv2bEyZMgX79+9HaGgoXnnlFWzduhXp6elYvny5FZpDRETWJKv3OS8vzxQQAPDrX/8a\n3377bafHaTQajBo1CgDg6emJkSNHoqysDDk5OdBqtQAArVaL7Ozs7tROREQKkxUSDQ0NOHfunOn7\nc+fOoaGhoUtvVFxcjIKCAowdOxYVFRXw9fUF0BIkt2/f7tK5iIjINmSPblq2bBn69u0LALh79y42\nbdok+03q6uqwZMkSpKSkwNPTE4IgdKvYns5B4ujYPuflym0D2D65Tv+jGHty/hdFZTUY5ueN+On/\ngrDHA6xybnuRFRLjx49HdnY2rl27BlEUERQUJHt0U3NzM5YsWYLY2FhEREQAAAYNGoTy8nL4+vpC\nr9fLnv/J1SfhYvuckyu3DWD75NJdKkP6oe9N31+/WY2Nu86juvpnTHrYr8fn7y5FJ/i7l8FggLu7\nO9zc3FBUVNRmeKwlKSkpCA4ORmJiomlbeHg49u/fDwA4cOAApk+f3sWyiYgcS1budYntP9q0DmuT\ndSWRkZGB999/HwMHDjTdKhIEATk5ORaPO3/+PA4fPoyQkBDExcVBEAQsXboUr7zyCt58803s27cP\nQ4YMQVpaWs9bQkRkRz+V15vdfrOizsaVWJeskNixYweOHDmCoUOHdunk48aNQ35+vtl9n376aZfO\nRUTkyIb49kOxvmMgDB7kaYdqrEfW7SaNRtPlgCAi6k2iQkdIbB9u20KsTNaVxOTJk7FhwwZERUXB\nw8PDtD04OFixwoiIHInuUhmycq/jp/J6DPHth6jQEW06pFu/zsr9ETcr6jB4kCeiQofbtdPaGmSF\nRGZmJoCWJ61byemTICJyBe1HLhXr60zftw8KZw+F9mSFRGdzNBERuTJLI5dcLRTakxUSQMuMsDqd\nDgDwxBNPYOTIkYoVRUTUXZ3dFuoOVx25JIesjuvMzEy8+OKLyM/PR35+PpKSknDo0CGlayMi6pLW\n20LF+joYRdF0W0h3qaxH5x3i28/sdmcfuSSH7CGwBw4cgEajAQDo9XosWLAAMTExihZHRNQVSt0W\nigod0aZP4v+3O/fIJTlk325qDYj2XxMROQqlbgu56sglOWSFxLBhw/Dxxx9j7ty5EAQBu3fvRmBg\noNK1ERF1iZIPtLniyCU5ZPVJ/P73v8e1a9cQExODmJgYXL16FWvWrFG6NiKiLnHVB9rsSdaVxKBB\ng/Dhhx8qXQsRUY/05ttCSpEVElu3bkVCQgIGDhwIALh9+zb27duHl19+WdHiiIi6qrfeFlKKrNtN\nWVlZpoAAgPvvvx9HjhxRrCgiInIMskJCFMUO2wwGg9WLISIixyIrJEaMGIH/+q//giiKMBqN2LFj\nB4YNG6Z0bUREZGeyQiI1NRUnTpzAmDFj8Nhjj+HUqVNYvXq10rUREZGdyeq49vPzw86dO1Ff3/Kg\nSr9+5h9RJyIi1yK7T2LPnj345JNP0K9fPxQXF+O///u/la6NiIjsTFZIrFu3Dt9++y2ys7MBAJ6e\nnnjvvfcULYyIiOxP1u0mnU6HzMxMaLVaAC1DYO/evatoYUREligxJTh1JCskPDw8IAiC6Xuj0ahY\nQUREnZG7Uhz1nKzbTSEhITh06BBEUURxcTH+/d//HePGjVO6NiIisyxNCU7WJSskVq5cibNnz0Kv\n1yM+Ph4GgwErVqxQujYiIrN680pxtibrdpOXlxfWrl2rdC1ERLIoOSU4tSXrSuLo0aOora0FAKSl\npWHBggX45z//qWhhRES6S2VYvV2Hl9efwOrtOtMypJwS3HZkhcSf/vQneHl5IS8vD2fOnEFcXByv\nLIhIUZbWq570sB9ejRmNAI0X1CoBARovvBozmp3WCpB1u8nNreVlf/vb3xAfH4/o6Gjs2LFD0cKI\nqHfrbL1qTgluG7KuJARBwKFDh5CVlYXQ0FAAQFNTk6KFEVHvxs5pxyArJN5++20cO3YM8fHxCAwM\nxPXr1zFp0qROj0tJScHkyZMRHR1t2rZlyxaEhYVBq9VCq9Xi9OnT3a+eiFzWEF/zc8Sxc9q2BNHc\nYhFWcu7cOXh6eiI5ORmHDx8G0BISnp6eSEpK6vL59Poaa5foMDQab7bPSbly2wD7ta/9A3OtrN33\n0Bs+v56w2Cfxl7/8BYmJidiwYYPZ/cnJyRZPPn78eJSUlHTYrmAuEZGD6uo0Glyv2jFYDAkPDw8A\n1p8aPCMjAwcPHsQjjzyClStXwtu7Z0lHRI6tu9NosHPa/hS93QQAJSUlWLRokel2U2VlJe6//34I\ngoAPP/wQer2eM8oSubjF75/A9ZvVHbaPGNwfm5f/0g4VkVwWryQyMjIsHvzcc891+Q19fHxMXyck\nJGDRokWyj3X1+4Zsn3Ny5bYB1mlfUan542+U1dj9Z9cbPr+esBgS1niquv2Fil6vh0ajAQAcP34c\nISEhPX4PInJsnEbDeVkMiXXr1vXo5G+99RZ0Oh2qqqowbdo0LF68GDqdDvn5+VCpVBg6dCjWrFnT\no/cgIscXFTrC7EglTqPh+CyGxKlTpywePHXqVIv7N23a1GHbnDlzZJRFRK6EI5Wcl8WQ+POf/yy5\nTxCETkOCiKgVRyo5J4sh8dlnn9mqDiIickAWQ+LGjRsIDAxEYWGh2f3BwcGKFEVERI7BYkisXbsW\n6enpWLhwYYd9giAgJydHscKIiMj+LIZEeno6AODrr7+2STFERORYZK0nAQANDQ0oLS2FwWAwbePt\nJiLH1NV5koikyAqJnTt34sMPP8SAAQOgUrXMLs7bTUSOqbvzJBGZIysk/vKXv+DYsWPw8+MvGJGj\n62xFN6KukLXokL+/PwOCyElIrehWrK/F6u066C6V2bgicmayriQWL16M1NRUTJ061TR9OND5E9dE\nJM3a/Qat5zNamNiZt56oq2SFxIkTJ3DixAlcv369TZ8EQ4Koe6zdbyC1ipsU3noiuWSFxPHjx/H1\n11/jvvvuU7oeol6hJ/0G5q5ApM4n5WZFxxlZicyRFRKBgYFwc5M9WpaIOiHVb9DZH2+pKxBB6Nr7\nc4pukkvWX/7hw4cjMTERERERcHd3N23vzqJDRNT5+gpS/RVSVwxuKhWaDMYO2328PVBZc7fDdk7R\nTXLJCommpiYMGzYMly9fVroeol7B0voKlvorpK5Amo0dAwIA4n/Z8sArp+im7pIVEj1dfIiI2rK0\nvsLq7Tqzx6Qf+h591AKMho77hvp6ISp0uGQYMBSouzpdvvSRRx6R3N/Y2IgbN25g5MiRVi+MyNVJ\nra8gdbUAAE0G88NbWwOBYUDW1ukEfw0NDZg1axbGjh0LX19f3L17F9euXcM333yDU6dOYeXKlQwJ\nIiuS6q+4Vx+1CkZR5O0jUpzFkNi8eTPy8vLw17/+Ff/xH/+B0tJS9O3bFyEhIYiIiEBGRga8vLxs\nVStRryDVX3EvoyhiW/IvbVQR9Wad9kmMGTMGY8aMsUUtRISW21B7ThSaHZXUikNYyVZkzd1ERLbV\nOipJCoewkq3wCTkiO+hs3qZJD/uhsOQOcs4Xdzh2+rgA9kGQzTAkiGxM7rxNz/0qBMFDB/AZB7Ir\nhgSRjXVl3iYOayV7k9UnUVFRgeXLl5um4SgoKMAXX3yhaGFErqq78zYR2YOskHj77bcxbtw4VFdX\nAwCCgoLw+eefK1oYkasa4tvP7HaOWCJHJCskysrKMG/ePKjVagCAu7u7aV0JIuqaqNAREts5Yokc\nj6w+ifbThFdXV0O0sPoVUW/TlVXmLM3bRORoZIXEjBkzsHr1atTV1WH//v34/PPPMWfOnE6PS0lJ\nwcmTJzFo0CAcPnwYAHDnzh0sXboUJSUlCAgIwEcffQRvb++etYLIjk7/o7jLq8yxQ5qchax7Ri+/\n/DLGjx+P0aNH49SpU3jhhReQmJjY6XGzZ8/G9u3b22zbunUrQkND8dVXX2HSpElIT0/vXuVEDmJP\nzv+a3Z6V+6ONKyGyPtlDYGNiYhATE9Olk48fPx4lJSVttuXk5GDXrl0AAK1WixdeeAHLly/v0nmJ\nHElRWY3Z7RytRK5AVkhUVFRg165dKCoqQnNzs2l7Wlpal9+wsrISvr6+AACNRoPbt293+RxE1tCV\nfgRLrx3m543rN6s7HMPRSuQKZIXEv/7rv+Lhhx9GaGioaYSTPWg0rt13wfbZjlQ/wtbD32O4f3/E\nT/8XhD0eYPG1/fvfh7DHAxA//V+wcdf5Du8xL/Ihh2pzT7hKO6S4evt6QlZINDQ04J133rHKGw4a\nNAjl5eXw9fWFXq+Hj4+P7GP1evOX9a5Ao/Fm+2zoi68KzG4XReD6zWps3HUe2w/+E/G/DJZ8QvqL\nr37AqIABCHs8ANXVP3cYrTQqYIBDtbm7HO2zs7be0L6ekBUSY8eOxQ8//ICHHnqoy2/QfqhseHg4\n9u/fj4ULF+LAgQOYPn16l89J1FOWVn9rVVlzF+mHvocgmN9/b58DRyuRq5IVEs888wyef/55+Pv7\nw8PDw7R97969Fo976623oNPpUFVVhWnTpmHx4sVYuHAh3njjDezbtw9DhgzpVr8GUU/JWf2tlZtK\nhSaDscN29jlQbyArJFasWIFFixbh4Ycf7lKfxKZNm8xu//TTT2Wfg8iaWjugS8rljzxqNnYMCIBP\nSFPvICskPDw8sGDBAqVrIeoWuaOU2k/RLddQXy9EhQ7nE9LUK8kKiaeeegqnT59GWFiY0vUQdYnc\ntRkA6Sm6AzQtIbDnZCEqqzsuGdoaCAwF6o1khcTu3buxdetWeHp6wt3dHaIoQhAE5ObmKl0fkUWW\n1mZo3d96hSF1i+lmRZ0pBFquSnjFQNRKVkjs27dP6TqIukVqlFJJeW2HKwwp93ZA84qBqC1ZITF0\n6FCl6yDqFqlRSlIjksxhBzSRNIshsWLFCmzcuBFz5syBYGaweGdDYImUcG9H9UAvd7OvkRqRJAgt\nHdG8nUQkj8WQaJ3p9Xe/+51NiiEyp30oVNb8f+dy69c+3h64U9do+sOflXvd7BXGUF8vrFkw0UaV\nEzk/iyHx+eef47333sPEifxHRfbRfvTSvQFxr3739cH7v53SZpu54a68tUTUNRZDIj8/31Z1EJm1\n50ShrNe1n5abq78RWYfs9SSIlNT+gbiHht2PH4puS145tGduigyOVCLqOYshcfnyZYSGhnbYzuck\nyJrMPRAnd16lVryNRKQMiyExYsQIbN261Va1UC8l9UCcHH3UKrwUNYpXDEQKsRgS7u7ufEaCFCdn\n2m4pDAgiZaks7ezTp4+t6qBebIhvP9mv7aNWQSW0zLf0asxoBgSRwixeSezevdtWdVAvFhU6Qtbs\nrAwFItvj6Cayu9Y//FKzsPr090D8tGAGBJEdMCTI6syt7zBrquV1djkLK5FjEsT2i1A7MFdfrNzZ\n2mcuDADzTzq3zJnkKbkgkDNzxs+uK9g+56bRWP4PWmd4JUHdIrXYj4+3h9nXi6LlBYGIyDFZHN1E\nJEXq2QY5T0i3LghERI6PIUHd0pNnG9rPs0REjou3m0iW9v0P/e5zQ21DU7fOZW6eJSJyTAwJ6pS5\n/oee4DxLRM6DIUGd6sncSq1UAjDE14tDWomcDEOCOtWd/of2K8XNmhrs0sMMiVwVQ4I6NcS3n+xb\nTHw6msi1MCSoU3LmVgrQ8FYSkStiSFCnTHMrnSg0+xwEJ94jcl12C4nw8HB4eXlBpVLBzc0Ne/fu\ntVcpJAPnViLqnewWEoIg4LPPPsOAAQPsVUKvZG6+pa78kee60US9i91CQhRFGI1Ge719ryQ13xLA\nuZSIyDy7XkksWLAAgiBg7ty5SEhIsFcpLkfqakHqeYes3B8ZEkRklt1C4ssvv4RGo0FlZSWSkpIQ\nFBSE8ePH26sclyF1tVBYckfyeQfOpUREUhxiPYktW7bA09MTSUlJ9i7F6S1+/wSu36w2u893YF+U\nVzV02D5icH9sXv5LpUsjIidklyuJhoYGGI1GeHp6or6+HmfOnMHrr7/e6XGu/MSutRY+KSqVPodU\nH1DkhEDFf7auvLCLK7cNYPucnVMuOlReXo7XX38dgiDAYDAgOjoaTz75pD1KcTmWno6+U9uIV2NG\ncwgrEclml5AIDAzEwYMH7fHWLs/S09GDB3lyCCsRdQkXHXIxkx72w/RxAWb3cYpuIuoqTsvhgp77\nVQiChw7gbSUi6jGGhIvibSUisgbebiIiIkkMCSIiksSQICIiSeyTsKGezsBKRGRrDAkbyTh+GTnn\ni03fcwZWInIGDAmF6S6VSa7oBnAGViJybAwJBbWfkdUczsBKRI6MIWEl5vobpNZvuNfgQZ5Kl0ZE\n1G0MCSuQWsNBEDo/llNlEJEj4xBYK5C6YnBTWf7xTh8XwP4IInJovJKwAqkV35ol1m/w8fZA/C+D\nGRBE5PAYElYgtYbDUF8vRIUO50R7ROS0GBKdkPMAnNQaDq2BwFAgImfFkLBAqkMaaPsAXOvXvGIg\nIlfDkGjn3isHtUS/s7kH4HjFQESuiCFxj/ZXDkaD+dfxATgi6i04BPYech5+A/gAHBH1HgyJe0gN\nZW2PD8ARUW/B2033kBrK2ketglEU2SFNRL0OQ+IeUkNZX4oaxWAgol6JIXEPDmUlImqLIdEOh7IS\nEf0/dlwTEZEkp76S4JrRRETKctqQkDtlBhERdZ/dbjedPn0av/71rxEZGYmtW7d2+XipB9+ycn/s\nWWFERGRil5AwGo149913sX37dhw5cgRZWVm4cuVKl84h9eAbp8wgIrIeu4REXl4ehg8fjqFDh6JP\nnz6IiopCTk6OxWNiVxzC6u066C6VAWh58M0cTplBRGQ9dgmJsrIyDB482PS9n58fbt26ZfEYo1E0\n9TvoLpUhKnSE2ddxygwiIuuxS0iIotij41un6n41ZjQCNF5QqwQEaLzwasxodloTEVmRXUY3+fv7\n46effjJ9X1ZWhgceeED28Tcr6qDReGPWVG/MmhqsRIl2odF427sERbly+1y5bQDb15vZJSQeffRR\nFBUVoaSkBBqNBllZWfjggw8sHnN4U6yNqiMiolZ2CQm1Wo1/+7d/w0svvQRRFPH0009j5MiR9iiF\niIgsEMSedhAQEZHL4txNREQkiSFBRESSGBJERCTJ4UOip3M8OaLw8HDExMQgLi4OTz/9NADgzp07\neOmllxAZGYkFCxagpqbGzlXKl5KSgsmTJyM6Otq0zVJ71q5dixkzZiA2Nhb5+fn2KLlLzLVvy5Yt\nCAsLg1arhVarxenTp0370tPTMWPGDPzmN7/BmTNn7FGybKWlpZg/fz5mzpyJ6Oho7Ny5E4DrfH7t\n2/fZZ58BcJ3Pr7GxEfHx8YiLi0N0dDS2bNkCACguLkZCQgIiIyOxbNkyNDc3m16/dOlSzJgxA3Pn\nzm3zKIIk0YEZDAYxIiJCLC4uFhsbG8WYmBixsLDQ3mX1WHh4uFhVVdVm24YNG8StW7eKoiiK6enp\n4saNG+1RWrd899134qVLl8RZs2aZtkm15+TJk+Irr7wiiqIoXrhwQYyPj7d9wV1krn2bN28Wd+zY\n0eG1hYWFYmxsrNjU1CTeuHFDjIiIEI1Goy3L7ZJbt26Jly5dEkVRFGtra8UZM2aIhYWFLvP5SbXP\nVT4/URTF+vp6URRFsbm5WYyPjxcvXLggvvHGG+LRo0dFURTF1atXi1988YUoiqKYkZEhvvPOO6Io\nimJWVpb45ptvdnp+h76S6M4cT85AFEUYjcY223JycqDVagEAWq0W2dnZ9iitW8aPH4/+/fu32da+\nPa2fW04mDIcbAAAJLUlEQVRODuLi4gAAY8eORU1NDcrLy21bcBeZax9gfuaAnJwczJw5E25ubggI\nCMDw4cORl5dnizK7RaPRYNSoUQAAT09PjBw5EmVlZS7z+ZlrX+sUQK7w+QFA3759AbRcJTQ3N0MQ\nBOh0OkRGRgJo+/fk3s81MjISubm5nZ7foUOiO3M8OQNBELBgwQLMmTMHe/bsAQBUVFTA19cXQMsv\n9u3bt+1ZYo9VVla2aU9lZSUA4NatW/D39ze9zs/PD2VlZXapsacyMjIQGxuL1NRU0+0Yc7+zztK+\n4uJiFBQUYOzYsR1+H13h82tt35gxYwC4zudnNBoRFxeHKVOmYMqUKQgMDET//v2hUrX8eff39ze1\n4d7PT61Wo3///qiqqrJ4focOCXNJ7wq+/PJL7N+/H9u2bUNGRgbOnTsHQRDsXZZNmPtMnbHtzz77\nLLKzs3Hw4EH4+vrij3/8IwDnbV9dXR2WLFmClJQUeHp6StbsKu1zpc9PpVIhMzMTp0+fRl5entll\nF1rb0L59oih22j6HDomezvHkqDQaDQDAx8cHERERyMvLw6BBg0yX7Xq9Hj4+PvYsscek2uPn54fS\n0lLT60pLS53yM/Xx8TH940pISDDdkvD398fNmzdNr3OG9jU3N2PJkiWIjY1FREQEANf6/My1z5U+\nv1ZeXl6YMGECLl68iOrqatMt7XvbcO/nZzAYUFtbiwEDBlg8r0OHxL1zPDU2NiIrKwvTp0+3d1k9\n0tDQgLq6loWR6uvrcebMGYSEhCA8PBz79+8HABw4cMDp2tn+fyhS7Zk+fToyMzMBABcuXED//v1N\ntzUcWfv26fV609fHjx9HSEgIgJZ2Hz16FI2Njbhx4waKiopMtzccVUpKCoKDg5GYmGja5kqfn7n2\nucrnV1lZabpV9vPPPyM3NxfBwcGYNGkSjh07BqDt5xceHo4DBw4AAI4dO4Ynnnii0/dw+Gk5Tp8+\njT/84Q+mOZ4WLlxo75J65MaNG3j99dchCAIMBgOio6OxcOFCVFVV4c0338TNmzcxZMgQpKWlme0s\ndURvvfUWdDodqqqq4Ovri8WLFyMiIgJvvPGG2fasWbMG33zzDfr27Yt169Zh9OjRdm6BZebap9Pp\nkJ+fD5VKhaFDh2LNmjWmP5bp6enYu3cv3NzckJqaiieffNLOLZB2/vx5PP/88wgJCYEgCBAEAUuX\nLsWYMWMkfx+d6fOTat+RI0dc4vP74YcfsHLlShiNRhiNRsycOROvvfYabty4gWXLlqG6uhqjRo3C\nxo0b0adPHzQ2NmLFihXIz8/HwIED8cEHHyAgIMDiezh8SBARkf049O0mIiKyL4YEERFJYkgQEZEk\nhgQREUliSBARkSSGBBERSWJIkMNrbm5GWloaIiMjER0djaioKKxfvx4Gg8HicatWrUJGRgaAlqmh\nN2zY0Ol7ZWdn43/+53+sUrcSSkpKsHv3bnuXQb0IQ4Ic3sqVK3HlyhVkZmbi8OHDOHToEIKCgtDY\n2Gj198rJyXHoWT+Li4vx17/+tVvHdhaqROa42bsAIkt+/PFH5OTkmJ7wBVpmr4yPjwfQMgPmxo0b\nTYvDPPnkk0hOTrY4adnly5fx+9//Hg0NDWhsbERCQgLmz5+PM2fO4Ouvv0Zubi727t2LF198EcXF\nxTh+/DgEQUBjYyOuXr2K7777Dl5eXm3O+Y9//AMbN25EXV0dBEFAcnIyJk+ejLy8PLz33ntoaGhA\n3759kZqaikcffRRnz57F+vXrsW/fPgBo8/3Zs2fx3nvvYcyYMbhw4QJUKhU++OADBAUF4d1330VJ\nSQm0Wi2GDRuGtLQ0XL16FevWrUNVVRWampowf/58zJ49GwDwi1/8AitWrMDJkycxYcIELFmyxOqf\nEbk4q6x6QaSQo0ePinFxcZL7P//8czEpKUlsbm4Wm5qaxMTERNMCKytXrhR37dolimLLIkHr168X\nRVEU6+rqxMbGRtPXM2fOFK9cudLhmPZWrFgh/vGPf+ywvaqqSpwyZYp44cIFURRF0Wg0itXV1WJj\nY6M4bdo0MTc3VxRFUfz73/8uTps2TWxqahJ1Op04Z84c0znu/V6n04mjR48W8/PzRVEUxT/96U/i\n8uXLO7xOFFsWmtFqteLVq1dFUWxZWCcyMtL0/UMPPST++c9/lvz5EXWGVxLk0MROZo3Jzc2FVquF\nWq0GAMyePRvZ2dl45plnJI9paGjAO++8g4KCAqhUKuj1ehQUFCAoKEjymI8++ggNDQ343e9+12Hf\nhQsXEBwcjLFjxwJomZbZ29sbly9fhru7u2kStdDQULi7u+PatWudtvvBBx/EL37xCwAti/ucPHnS\n7OuuX7+Oq1evYtmyZaafVVNTE65cuYIHH3wQAEyLBBF1B0OCHNro0aNx/fp11NTUwNvbu8N+0cx8\n+J3Nj//BBx9Ao9Fgw4YNpgWgLPVv7Nu3D99++61p/WdzNcjd3lqvWq1uszrh3bt327zOw8PD9LVa\nrTatUWzufD4+PqaZPdsTBAH9+vUzu49IDnZck0MbPnw4wsPDsXr1atMU6waDATt37kRDQwMmT56M\nAwcOoLm5GU1NTcjMzMSUKVMsnrOmpgaDBw+GIAi4fPkyzp07Z9rn6emJ2tpa0/d///vfsW3bNnzy\nySdwd3c3e77HH38chYWFuHjxIoCWfpLq6moEBQWhqakJZ8+eBQB8++23aG5uxogRIxAQEIDi4mLU\n1NRAFEVkZWXJ+nl4eXmZpoYGWq447rvvPhw8eNC07erVq6afVWdXYkSd4ZUEObz169dj8+bNmD17\nNtzd3SGKIsLCwuDu7o65c+eiqKjItG7vU089ZerUlvLaa68hOTkZhw4dwrBhwzBhwgTTvtjYWKxa\ntQrHjh3Diy++iH379qGhoQELFiwwXQVkZGS0+d/5gAEDsGXLFqxbtw719fVQq9VITk5GaGgoPv74\nY6xdu9bUcb1582a4ubnBz88PSUlJ0Gq1CAwMxKOPPorCwsJOfxYPPfQQHnzwQURHRyMoKAhpaWn4\nz//8T/zhD3/Ajh07YDAY4Ovri48++giA46+qRo6PU4UTEZEk3m4iIiJJDAkiIpLEkCAiIkkMCSIi\nksSQICIiSQwJIiKSxJAgIiJJDAkiIpL0f3zF2/hGE4QYAAAAAElFTkSuQmCC\n",
+ "text/plain": [
+ "\u003cmatplotlib.figure.Figure at 0x7fc3af690a50\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "with context.eager_mode():\n",
+ "\n",
+ " counts = []\n",
+ " times = [] \n",
+ " for n in np.logspace(0, 7, 50):\n",
+ "\n",
+ " n_tensor = tf.constant(n, dtype=tf.float32)\n",
+ " count = collatz(n_tensor)\n",
+ "\n",
+ " res = %timeit -n10 -r1 -o -q collatz(n_tensor)\n",
+ " times.append(res.best)\n",
+ " counts.append(count)\n",
+ " \n",
+ "plot_results(counts, times, 'Eager')\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "x5ChBlH09jk_",
+ "_cRFTcwT9mnn"
+ ],
+ "default_view": {},
+ "last_runtime": {
+ "build_target": "",
+ "kind": "local"
+ },
+ "name": "Autograph vs. Eager Collatz speed test",
+ "provenance": [
+ {
+ "file_id": "0B8bm7KvwJklpMUQtbnVpYkdJUjRtOTRyWVVfSEhpRl9HYm5n",
+ "timestamp": 1531512047714
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb
new file mode 100644
index 0000000000..952ec091fb
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/ag_vs_eager_mnist_speed_test.ipynb
@@ -0,0 +1,652 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "etTmZVFN8fYO"
+ },
+ "source": [
+ "This notebook runs a basic speed test for a short training loop of a neural network training on the MNIST dataset."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "eqOvRhOz8SWs"
+ },
+ "source": [
+ "### Imports"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "nHY0tntRizGb"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "Pa2qpEmoVOGe"
+ },
+ "outputs": [],
+ "source": [
+ "import gzip\n",
+ "import os\n",
+ "import shutil\n",
+ "import time\n",
+ "\n",
+ "import numpy as np\n",
+ "import six\n",
+ "from six.moves import urllib\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "from tensorflow.contrib.eager.python import tfe\n",
+ "from tensorflow.python.eager import context\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "PZWxEJFM9A7b"
+ },
+ "source": [
+ "### Testing boilerplate"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "kfZk9EFZ5TeQ"
+ },
+ "outputs": [],
+ "source": [
+ "# Test-only parameters. Test checks successful completion not correctness. \n",
+ "burn_ins = 1\n",
+ "trials = 1\n",
+ "max_steps = 2\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "k0GKbZBJ9Gt9"
+ },
+ "source": [
+ "### Speed test configuration"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "gWXV8WHn43iZ"
+ },
+ "outputs": [],
+ "source": [
+ "#@test {\"skip\": true} \n",
+ "burn_ins = 3\n",
+ "trials = 10\n",
+ "max_steps = 500\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "kZV_3pGy8033"
+ },
+ "source": [
+ "### Data source setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "YfnHJbBOBKae"
+ },
+ "outputs": [],
+ "source": [
+ "def download(directory, filename):\n",
+ " filepath = os.path.join(directory, filename)\n",
+ " if tf.gfile.Exists(filepath):\n",
+ " return filepath\n",
+ " if not tf.gfile.Exists(directory):\n",
+ " tf.gfile.MakeDirs(directory)\n",
+ " url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
+ " zipped_filepath = filepath + '.gz'\n",
+ " print('Downloading %s to %s' % (url, zipped_filepath))\n",
+ " urllib.request.urlretrieve(url, zipped_filepath)\n",
+ " with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n",
+ " shutil.copyfileobj(f_in, f_out)\n",
+ " os.remove(zipped_filepath)\n",
+ " return filepath\n",
+ "\n",
+ "\n",
+ "def dataset(directory, images_file, labels_file):\n",
+ " images_file = download(directory, images_file)\n",
+ " labels_file = download(directory, labels_file)\n",
+ "\n",
+ " def decode_image(image):\n",
+ " # Normalize from [0, 255] to [0.0, 1.0]\n",
+ " image = tf.decode_raw(image, tf.uint8)\n",
+ " image = tf.cast(image, tf.float32)\n",
+ " image = tf.reshape(image, [784])\n",
+ " return image / 255.0\n",
+ "\n",
+ " def decode_label(label):\n",
+ " label = tf.decode_raw(label, tf.uint8)\n",
+ " label = tf.reshape(label, [])\n",
+ " return tf.to_int32(label)\n",
+ "\n",
+ " images = tf.data.FixedLengthRecordDataset(\n",
+ " images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
+ " labels = tf.data.FixedLengthRecordDataset(\n",
+ " labels_file, 1, header_bytes=8).map(decode_label)\n",
+ " return tf.data.Dataset.zip((images, labels))\n",
+ "\n",
+ "\n",
+ "def mnist_train(directory):\n",
+ " return dataset(directory, 'train-images-idx3-ubyte',\n",
+ " 'train-labels-idx1-ubyte')\n",
+ "\n",
+ "def mnist_test(directory):\n",
+ " return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n",
+ "\n",
+ "def setup_mnist_data(is_training, hp, batch_size):\n",
+ " if is_training:\n",
+ " ds = mnist_train('/tmp/autograph_mnist_data')\n",
+ " ds = ds.cache()\n",
+ " ds = ds.shuffle(batch_size * 10)\n",
+ " else:\n",
+ " ds = mnist_test('/tmp/autograph_mnist_data')\n",
+ " ds = ds.cache()\n",
+ " ds = ds.repeat()\n",
+ " ds = ds.batch(batch_size)\n",
+ " return ds\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "qzkZyZcS9THu"
+ },
+ "source": [
+ "### Keras model definition"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "x_MU13boiok2"
+ },
+ "outputs": [],
+ "source": [
+ "def mlp_model(input_shape):\n",
+ " model = tf.keras.Sequential((\n",
+ " tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
+ " tf.keras.layers.Dense(100, activation='relu'),\n",
+ " tf.keras.layers.Dense(10, activation='softmax')))\n",
+ " model.build()\n",
+ " return model\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "DXt4GoTxtvn2"
+ },
+ "source": [
+ "# AutoGraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "W51sfbONiz_5"
+ },
+ "outputs": [],
+ "source": [
+ "def predict(m, x, y):\n",
+ " y_p = m(x)\n",
+ " losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
+ " l = tf.reduce_mean(losses)\n",
+ " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+ " accuracy = tf.reduce_mean(accuracies)\n",
+ " return l, accuracy\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "CsAD0ajbi9iZ"
+ },
+ "outputs": [],
+ "source": [
+ "def fit(m, x, y, opt):\n",
+ " l, accuracy = predict(m, x, y)\n",
+ " opt.minimize(l)\n",
+ " return l, accuracy\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "RVw57HdTjPzi"
+ },
+ "outputs": [],
+ "source": [
+ "def get_next_batch(ds):\n",
+ " itr = ds.make_one_shot_iterator()\n",
+ " image, label = itr.get_next()\n",
+ " x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
+ " y = tf.one_hot(tf.squeeze(label), 10)\n",
+ " return x, y\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "UUI0566FjZPx"
+ },
+ "outputs": [],
+ "source": [
+ "def train(train_ds, test_ds, hp):\n",
+ " m = mlp_model((28 * 28,))\n",
+ " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
+ " train_losses = []\n",
+ " test_losses = []\n",
+ " train_accuracies = []\n",
+ " test_accuracies = []\n",
+ " ag.set_element_type(train_losses, tf.float32)\n",
+ " ag.set_element_type(test_losses, tf.float32)\n",
+ " ag.set_element_type(train_accuracies, tf.float32)\n",
+ " ag.set_element_type(test_accuracies, tf.float32)\n",
+ "\n",
+ " i = tf.constant(0)\n",
+ " while i \u003c hp.max_steps:\n",
+ " train_x, train_y = get_next_batch(train_ds)\n",
+ " test_x, test_y = get_next_batch(test_ds)\n",
+ " step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
+ " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+ "\n",
+ " train_losses.append(step_train_loss)\n",
+ " test_losses.append(step_test_loss)\n",
+ " train_accuracies.append(step_train_accuracy)\n",
+ " test_accuracies.append(step_test_accuracy)\n",
+ "\n",
+ " i += 1\n",
+ " return (ag.stack(train_losses), ag.stack(test_losses),\n",
+ " ag.stack(train_accuracies), ag.stack(test_accuracies))\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 215
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 12156,
+ "status": "ok",
+ "timestamp": 1531752050611,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "K1m8TwOKjdNd",
+ "outputId": "bd5746f2-bf91-44aa-9eff-38eb11ced33f"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "('Duration:', 0.6226680278778076)\n",
+ "('Duration:', 0.6082069873809814)\n",
+ "('Duration:', 0.6223258972167969)\n",
+ "('Duration:', 0.6176440715789795)\n",
+ "('Duration:', 0.6309840679168701)\n",
+ "('Duration:', 0.6180410385131836)\n",
+ "('Duration:', 0.6219630241394043)\n",
+ "('Duration:', 0.6183009147644043)\n",
+ "('Duration:', 0.6176400184631348)\n",
+ "('Duration:', 0.6476900577545166)\n",
+ "('Mean duration:', 0.62254641056060789, '+/-', 0.0099792188690656976)\n"
+ ]
+ }
+ ],
+ "source": [
+ "#@test {\"timeout\": 90}\n",
+ "with tf.Graph().as_default():\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=max_steps,\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 500)\n",
+ " test_ds = setup_mnist_data(False, hp, 100)\n",
+ " tf_train = ag.to_graph(train)\n",
+ " losses = tf_train(train_ds, test_ds, hp)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " durations = []\n",
+ " for t in range(burn_ins + trials):\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ "\n",
+ " start = time.time()\n",
+ " (train_losses, test_losses, train_accuracies,\n",
+ " test_accuracies) = sess.run(losses)\n",
+ "\n",
+ " if t \u003c burn_ins:\n",
+ " continue\n",
+ "\n",
+ " duration = time.time() - start\n",
+ " durations.append(duration)\n",
+ " print('Duration:', duration)\n",
+ "\n",
+ " print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "A06kdgtZtlce"
+ },
+ "source": [
+ "# Eager"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "hBKOKGrWty4e"
+ },
+ "outputs": [],
+ "source": [
+ "def predict(m, x, y):\n",
+ " y_p = m(x)\n",
+ " losses = tf.keras.losses.categorical_crossentropy(tf.cast(y, tf.float32), y_p)\n",
+ " l = tf.reduce_mean(losses)\n",
+ " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
+ " accuracy = tf.reduce_mean(accuracies)\n",
+ " return l, accuracy\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "HCgTZ0MTt6vt"
+ },
+ "outputs": [],
+ "source": [
+ "def train(ds, hp):\n",
+ " m = mlp_model((28 * 28,))\n",
+ " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
+ " train_losses = []\n",
+ " test_losses = []\n",
+ " train_accuracies = []\n",
+ " test_accuracies = []\n",
+ "\n",
+ " i = 0\n",
+ " train_test_itr = tfe.Iterator(ds)\n",
+ " for (train_x, train_y), (test_x, test_y) in train_test_itr:\n",
+ " train_x = tf.to_float(tf.reshape(train_x, (-1, 28 * 28)))\n",
+ " train_y = tf.one_hot(tf.squeeze(train_y), 10)\n",
+ " test_x = tf.to_float(tf.reshape(test_x, (-1, 28 * 28)))\n",
+ " test_y = tf.one_hot(tf.squeeze(test_y), 10)\n",
+ "\n",
+ " if i \u003e hp.max_steps:\n",
+ " break\n",
+ "\n",
+ " with tf.GradientTape() as tape:\n",
+ " step_train_loss, step_train_accuracy = predict(m, train_x, train_y)\n",
+ " grad = tape.gradient(step_train_loss, m.variables)\n",
+ " opt.apply_gradients(zip(grad, m.variables))\n",
+ " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+ "\n",
+ " train_losses.append(step_train_loss)\n",
+ " test_losses.append(step_test_loss)\n",
+ " train_accuracies.append(step_train_accuracy)\n",
+ " test_accuracies.append(step_test_accuracy)\n",
+ "\n",
+ " i += 1\n",
+ " return train_losses, test_losses, train_accuracies, test_accuracies\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 215
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 52499,
+ "status": "ok",
+ "timestamp": 1531752103279,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "plv_yrn_t8Dy",
+ "outputId": "55d5ab3d-252d-48ba-8fb4-20ec3c3e6d00"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "('Duration:', 3.9973549842834473)\n",
+ "('Duration:', 4.018772125244141)\n",
+ "('Duration:', 3.9740989208221436)\n",
+ "('Duration:', 3.9922947883605957)\n",
+ "('Duration:', 3.9795801639556885)\n",
+ "('Duration:', 3.966722011566162)\n",
+ "('Duration:', 3.986541986465454)\n",
+ "('Duration:', 3.992305040359497)\n",
+ "('Duration:', 4.012261867523193)\n",
+ "('Duration:', 4.004716157913208)\n",
+ "('Mean duration:', 3.9924648046493529, '+/-', 0.015681688635624851)\n"
+ ]
+ }
+ ],
+ "source": [
+ "#@test {\"timeout\": 90}\n",
+ "with context.eager_mode():\n",
+ " durations = []\n",
+ " for t in range(burn_ins + trials):\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=max_steps,\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 500)\n",
+ " test_ds = setup_mnist_data(False, hp, 100)\n",
+ " ds = tf.data.Dataset.zip((train_ds, test_ds))\n",
+ " start = time.time()\n",
+ " (train_losses, test_losses, train_accuracies,\n",
+ " test_accuracies) = train(ds, hp)\n",
+ " \n",
+ " train_losses[-1].numpy()\n",
+ " test_losses[-1].numpy()\n",
+ " train_accuracies[-1].numpy()\n",
+ " test_accuracies[-1].numpy()\n",
+ "\n",
+ " if t \u003c burn_ins:\n",
+ " continue\n",
+ "\n",
+ " duration = time.time() - start\n",
+ " durations.append(duration)\n",
+ " print('Duration:', duration)\n",
+ "\n",
+ " print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "eqOvRhOz8SWs",
+ "PZWxEJFM9A7b",
+ "kZV_3pGy8033"
+ ],
+ "default_view": {},
+ "name": "Autograph vs. Eager MNIST speed test",
+ "provenance": [
+ {
+ "file_id": "1tAQW5tHUgAc8M4-iwwJm6Xs6dV9nEqtD",
+ "timestamp": 1530297010607
+ },
+ {
+ "file_id": "18dCjshrmHiPTIe1CNsL8tnpdGkuXgpM9",
+ "timestamp": 1530289467317
+ },
+ {
+ "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
+ "timestamp": 1522272821237
+ },
+ {
+ "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
+ "timestamp": 1522238054357
+ },
+ {
+ "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
+ "timestamp": 1521743157199
+ },
+ {
+ "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
+ "timestamp": 1520522344607
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/algorithms.ipynb b/tensorflow/contrib/autograph/examples/notebooks/algorithms.ipynb
new file mode 100644
index 0000000000..bf824e2760
--- /dev/null
+++ b/tensorflow/contrib/autograph/examples/notebooks/algorithms.ipynb
@@ -0,0 +1,1512 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "b9R-4ezU3NH0"
+ },
+ "source": [
+ "## AutoGraph: examples of simple algorithms\n",
+ "\n",
+ "This notebook shows how you can use AutoGraph to compile simple algorithms and run them in TensorFlow.\n",
+ "\n",
+ "It requires the nightly build of TensorFlow, which is installed below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "TuWj26KWz1fZ"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "3kudk1elq0Gh"
+ },
+ "source": [
+ "### Fibonacci numbers\n",
+ "\n",
+ "https://en.wikipedia.org/wiki/Fibonacci_number"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 197
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 7512,
+ "status": "ok",
+ "timestamp": 1532101577266,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "H7olFlMXqrHe",
+ "outputId": "472dbfe0-9449-4f93-e908-1a0785188a92"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "0 : 1\n",
+ "1 : 2\n",
+ "2 : 3\n",
+ "3 : 5\n",
+ "4 : 8\n",
+ "5 : 13\n",
+ "6 : 21\n",
+ "7 : 34\n",
+ "8 : 55\n",
+ "9 : 89\n"
+ ]
+ }
+ ],
+ "source": [
+ "import tensorflow as tf\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "\n",
+ "\n",
+ "def fib(n):\n",
+ " f1 = 0\n",
+ " f2 = 1\n",
+ " for i in range(n):\n",
+ " tmp = f2\n",
+ " f2 = f2 + f1\n",
+ " f1 = tmp\n",
+ " print(i, ': ', f2)\n",
+ " return f2\n",
+ "\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " final_fib = ag.to_graph(fib)(tf.constant(10))\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(final_fib)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "p8zZyj-tq4K3"
+ },
+ "source": [
+ "#### Generated code"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 541
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 103,
+ "status": "ok",
+ "timestamp": 1532101577412,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "UeWjK8rHq6Cj",
+ "outputId": "73ece895-12fb-489a-e52c-032945d7ed7a"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "from __future__ import print_function\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "def tf__fib(n):\n",
+ " try:\n",
+ " with tf.name_scope('fib'):\n",
+ " f1 = 0\n",
+ " f2 = 1\n",
+ "\n",
+ " def extra_test(f1_1, f2_1):\n",
+ " with tf.name_scope('extra_test'):\n",
+ " return True\n",
+ "\n",
+ " def loop_body(i, f1_1, f2_1):\n",
+ " with tf.name_scope('loop_body'):\n",
+ " tmp = f2_1\n",
+ " f2_1 = f2_1 + f1_1\n",
+ " f1_1 = tmp\n",
+ " with ag__.utils.control_dependency_on_returns(ag__.utils.\n",
+ " dynamic_print(i, ': ', f2_1)):\n",
+ " f2, i_1 = ag__.utils.alias_tensors(f2_1, i)\n",
+ " return f1_1, f2\n",
+ " f1, f2 = ag__.for_stmt(ag__.utils.dynamic_builtin(range, n),\n",
+ " extra_test, loop_body, (f1, f2))\n",
+ " return f2\n",
+ " except:\n",
+ " ag__.rewrite_graph_construction_error(ag_source_map__)\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(ag.to_code(fib))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "eIfVy6ZTrFEH"
+ },
+ "source": [
+ "### Fizz Buzz\n",
+ "\n",
+ "https://en.wikipedia.org/wiki/Fizz_buzz"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 125
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 233,
+ "status": "ok",
+ "timestamp": 1532101577681,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "33CAheYsrEQ7",
+ "outputId": "82a493ee-15b5-419d-8c9c-5f4159090a05"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Buzz\n",
+ "11\n",
+ "Fizz\n",
+ "13\n",
+ "14\n",
+ "FizzBuzz\n"
+ ]
+ }
+ ],
+ "source": [
+ "import tensorflow as tf\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "\n",
+ "def fizzbuzz(i, n):\n",
+ " while i \u003c n:\n",
+ " msg = ''\n",
+ " if i % 3 == 0:\n",
+ " msg += 'Fizz'\n",
+ " if i % 5 == 0:\n",
+ " msg += 'Buzz'\n",
+ " if msg == '':\n",
+ " msg = tf.as_string(i)\n",
+ " print(msg)\n",
+ " i += 1\n",
+ " return i\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " final_i = ag.to_graph(fizzbuzz)(tf.constant(10), tf.constant(16))\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(final_i)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "Lkq3DBGOv3fA"
+ },
+ "source": [
+ "#### Generated code"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 1081
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 289,
+ "status": "ok",
+ "timestamp": 1532101578003,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "bBhFIIaZrxvx",
+ "outputId": "d076a7ea-e643-4689-f90a-57f5d086dedc"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "from __future__ import print_function\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "def tf__fizzbuzz(i, n):\n",
+ " try:\n",
+ " with tf.name_scope('fizzbuzz'):\n",
+ "\n",
+ " def loop_test(i_1):\n",
+ " with tf.name_scope('loop_test'):\n",
+ " return tf.less(i_1, n)\n",
+ "\n",
+ " def loop_body(i_1):\n",
+ " with tf.name_scope('loop_body'):\n",
+ " msg = ''\n",
+ "\n",
+ " def if_true():\n",
+ " with tf.name_scope('if_true'):\n",
+ " msg_1, = msg,\n",
+ " msg_1 += 'Fizz'\n",
+ " return msg_1,\n",
+ "\n",
+ " def if_false():\n",
+ " with tf.name_scope('if_false'):\n",
+ " return msg,\n",
+ " msg = ag__.utils.run_cond(tf.equal(i_1 % 3, 0), if_true, if_false)\n",
+ "\n",
+ " def if_true_1():\n",
+ " with tf.name_scope('if_true_1'):\n",
+ " msg_2, = msg,\n",
+ " msg_2 += 'Buzz'\n",
+ " return msg_2,\n",
+ "\n",
+ " def if_false_1():\n",
+ " with tf.name_scope('if_false_1'):\n",
+ " return msg,\n",
+ " msg = ag__.utils.run_cond(tf.equal(i_1 % 5, 0), if_true_1, if_false_1\n",
+ " )\n",
+ "\n",
+ " def if_true_2():\n",
+ " with tf.name_scope('if_true_2'):\n",
+ " msg_3, = msg,\n",
+ " msg_3 = tf.as_string(i_1)\n",
+ " return msg_3,\n",
+ "\n",
+ " def if_false_2():\n",
+ " with tf.name_scope('if_false_2'):\n",
+ " return msg,\n",
+ " msg = ag__.utils.run_cond(tf.equal(msg, ''), if_true_2, if_false_2)\n",
+ " with ag__.utils.control_dependency_on_returns(ag__.utils.\n",
+ " dynamic_print(msg)):\n",
+ " msg_4 = ag__.utils.alias_tensors(msg)\n",
+ " i_1 += 1\n",
+ " return i_1,\n",
+ " i = ag__.while_stmt(loop_test, loop_body, (i,), (tf, n, ag__, i))\n",
+ " return i\n",
+ " except:\n",
+ " ag__.rewrite_graph_construction_error(ag_source_map__)\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(ag.to_code(fizzbuzz))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "BNRtprSvwJgk"
+ },
+ "source": [
+ "### Conway's Game of Life\n",
+ "\n",
+ "https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "r8_0ioEuAI-a"
+ },
+ "source": [
+ "#### Testing boilerplate"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "7moIlf8VABkl"
+ },
+ "outputs": [],
+ "source": [
+ "NUM_STEPS = 1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "QlEvfIQPAYF5"
+ },
+ "source": [
+ "#### Game of Life for AutoGraph"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "5pCK2qQSAAK4"
+ },
+ "outputs": [],
+ "source": [
+ "#@test {\"skip\": true} \n",
+ "NUM_STEPS = 100"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 308
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 14892,
+ "status": "ok",
+ "timestamp": 1532101593030,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "hC3qMqryPDHS",
+ "outputId": "8405c0e9-e518-41d6-f5bc-e78df6474169"
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\u003cvideo width=\"432.0\" height=\"288.0\" controls autoplay loop\u003e\n",
+ " \u003csource type=\"video/mp4\" src=\"data:video/mp4;base64,AAAAHGZ0eXBNNFYgAAACAGlzb21pc28yYXZjMQAAAAhmcmVlAACZUm1kYXQAAAKuBgX//6rcRem9\n",
+ "5tlIt5Ys2CDZI+7veDI2NCAtIGNvcmUgMTQ4IHIyNzk1IGFhYTlhYTggLSBILjI2NC9NUEVHLTQg\n",
+ "QVZDIGNvZGVjIC0gQ29weWxlZnQgMjAwMy0yMDE3IC0gaHR0cDovL3d3dy52aWRlb2xhbi5vcmcv\n",
+ "eDI2NC5odG1sIC0gb3B0aW9uczogY2FiYWM9MSByZWY9MyBkZWJsb2NrPTE6MDowIGFuYWx5c2U9\n",
+ "MHgzOjB4MTEzIG1lPWhleCBzdWJtZT03IHBzeT0xIHBzeV9yZD0xLjAwOjAuMDAgbWl4ZWRfcmVm\n",
+ "PTEgbWVfcmFuZ2U9MTYgY2hyb21hX21lPTEgdHJlbGxpcz0xIDh4OGRjdD0xIGNxbT0wIGRlYWR6\n",
+ "b25lPTIxLDExIGZhc3RfcHNraXA9MSBjaHJvbWFfcXBfb2Zmc2V0PS0yIHRocmVhZHM9OSBsb29r\n",
+ "YWhlYWRfdGhyZWFkcz0xIHNsaWNlZF90aHJlYWRzPTAgbnI9MCBkZWNpbWF0ZT0xIGludGVybGFj\n",
+ "ZWQ9MCBibHVyYXlfY29tcGF0PTAgY29uc3RyYWluZWRfaW50cmE9MCBiZnJhbWVzPTMgYl9weXJh\n",
+ "bWlkPTIgYl9hZGFwdD0xIGJfYmlhcz0wIGRpcmVjdD0xIHdlaWdodGI9MSBvcGVuX2dvcD0wIHdl\n",
+ "aWdodHA9MiBrZXlpbnQ9MjUwIGtleWludF9taW49MTAgc2NlbmVjdXQ9NDAgaW50cmFfcmVmcmVz\n",
+ "aD0wIHJjX2xvb2thaGVhZD00MCByYz1jcmYgbWJ0cmVlPTEgY3JmPTIzLjAgcWNvbXA9MC42MCBx\n",
+ "cG1pbj0wIHFwbWF4PTY5IHFwc3RlcD00IGlwX3JhdGlvPTEuNDAgYXE9MToxLjAwAIAAAAPQZYiE\n",
+ "ABH//veIHzLLafk613IR560urR9Q7kZxXqS9/iAAAAMAFpyZZ6/h5MpYA5/oqv4s2qPbYpW3jfK6\n",
+ "zQ6q7WMrNj7Hy8jZzmBpfHCwAAO1W4riBNsrapcCk+5V1W0XkkFULR4Qe+H3uGA2HgNW0zFAAUgt\n",
+ "W4tdpXv2OEg0Vuy5W5l/xGRmEGKDyeXyrM0S6q/1EKbad0x2mcHseUqNmeOGLy1N3b376XZKZcPY\n",
+ "IXC5F2332tNMj8CwOQiXM9PiCLyCVfZ3rQSkKBTZErkpS5kXUyoJG3FdIqLjRFKEapbUjcW64HIo\n",
+ "BeIbtRyWV9FyZfcTakx2KW3eB4ZI//MDykSe8CRgN76uBEqZFXwO63wmUREhHOb5AdaLV3xyGl/I\n",
+ "RV70rU/3t9t1aq5mFD3hy1aLTAV2U7nG072dyX87F7NgCxZHT2kFxu44fxf6gqVzE3PEbGr5fx9x\n",
+ "7TKXtmY53VP8UaeCd2HJiZ/sd165SutTnfiWvaLuCnmmXGF0AGqbj9S19kgOhTubZIJBydTTqQOV\n",
+ "YRlxbgKn2nzvunv9+NDG0/2ikyyp73W15QClmjyt8dUeynoN8CwtEQ59DdrAPZe4ARZTwWAfsRXw\n",
+ "1vcZ6Gr1nCNWllQw5IyZyxQtXrfc5p4wjPvGaltciG7d3FG1SGk6HDsZy5i/PsnkjRXLUvGbzYp2\n",
+ "2gs7ZSGfSJbEifctcMGeSqhOOYORKy6f/9omoieCVEEkniBXwWZ/eImb3nxF7SFIaBjgG2j9w5ut\n",
+ "BY6zSuQ5zRCdajzJ1loNO0havI8mp5yViAeAlLKYCxeK0Lha1FskL67W1YsARZVZ5EkhqAYEeTNI\n",
+ "M38Og48OXmj6QBN7c1b9uDUTacYEXO88ZQ1gCIREIMnm2Fgkir8pN4gtSeQ12sfOVz5x5KX7sa95\n",
+ "L4LyFQPDrFZcDBr4PWLeEEv8yzk0cYHE97GmAlA6WQ0HlWsS42cnXefvTPXnx4vcq8pbEo/slAuH\n",
+ "IBsrJEN1+aMCc9FNxwUPVbZVaWVjwLY0qh+mNWEaiNGRmacDXrYWw0NjqMPiLiFHacY5oGELRgym\n",
+ "S2mSo6zhsD1wKQ3EUQtwrjKPiDYc/HCqhkVwoWKUdI8xTS60kn4f5UqB0L77Yevh/wt7AnvQKQAq\n",
+ "QAEEevggRl1uigbOBTtscnYRnAj0edW4QExAzdo+RwLWXTzW/l3cBWTrh3ORzZQlxJ8jQTvPLB+f\n",
+ "bLazJZWFQQDcWhuhQ3gYcP1ruNwIroINRIr8px0UOgAhnk6CllxMN6gA5S0YPhFVFKd3n0AAAC9f\n",
+ "vYgISQAAAltBmiRsQR/+tSqC8p1IAOZemTPutEfx0mzK8zG8tdIxonBsDpoLZ+NnIOp4qK6idP1s\n",
+ "vbGvZz/zHM86Bg3q0yx2atmtgoo/Trt3YRy3se4HTjou+tCi7oJt2d7A8vEhVDu33JNJx+WCOgP0\n",
+ "03nVdg9lBs15v/0w7qMc3zqqJXCOy/Whl9aRhcaeOEWcD7uK6mCV8a6MpDJ959xBRfv2i/qFOFbL\n",
+ "Grs58WiGJcq4MQJI+rVWuFN50oiqBgiunfUrRmdviPYpNN11V9pwcOJwssWfIE3agnor/RC7vfLY\n",
+ "YoXzaJjtWLEL92OOaHLZT0j555xfb4FZcoJee+RXovB9IaoDdYRusngtBXPMUvnO+g2Z5Qdo9P8q\n",
+ "Zb8ItBAeHT8IBZAD/Z2nEA6qbxqOBSBtQNW6ZFYLtCTIoP/bLjCDHgtZk3cf+N1CpXs15pUIYWDW\n",
+ "elZtlTkM4w4EJlLdjLZyQPAeaBx/qoLmKyTKAEhm0hU8EcTq00f6fwkWgz2J6GTGtL/vJXgC8u4o\n",
+ "nTnf+Ou7sVJGVaouXxrzx+yGVHEcp/eV4gaFA95rInngQAOZWbA3558nK61JBPZl3NjEv5B9r9pg\n",
+ "2+SYY3wBAUeu2fgAB2+yYGw82pkoJJKpzYWORs6i1vn3GEgUTcwlYsdJcraYC5SnGvqSZhX7KM72\n",
+ "uE1e9bkpvpVyG/mkACn5R4jwX3xc2utCjjZgM101rirIF/7VfDtmJsSTDes+UVhbSr3SeMSI9ixJ\n",
+ "+fVuFZ5bnQPoRIfPc+Erw+K99JiGN+HE98/eq4pPlMY9oCfVPSdNyOAAAAFfQZ5CeId/AUuqOi5D\n",
+ "jlKfxuJGZZ1+rVyomjOIykvxtsjsuCiGtElbraCSFWcn3aIYWLrF3fPovVLcOnroBkiRMsdf5yJA\n",
+ "F87MQuoKeTaGOrxojCCCS64RiHrqNsE+7mfRRUDuB4sAEHFQHxBorgTukPSvrdFr5QDq+BhZj/6H\n",
+ "KN+IutwFWKX3ZX9pO3sI8My78TgRY5AA6FEcT91WcvnMypB/OWXzK6M8fYuhVVWipAZigjVOYhcF\n",
+ "9i6GweQFX9AV9EUQOp2qFbkrT5jceBRFLX6j4JUQ781/UGTekv1fcpCmzlpNpp8GdSeWxRL4gasp\n",
+ "F5uO5KW63rlhYccBo1cFwIN8txHNnwyQNiP00XC0PWDRZfaWSxsACRWrISow71IyUfcL7JNhjTII\n",
+ "rwDYATS0xZ9ep8siFC3JTxg1eNaroYfeI4tbkRHok47Vk+CUOQPuagVBtFMOOcy2OUbw8AWlAAAA\n",
+ "ugGeYXRDfwHM79ghzBo9nMnzfQPPIuvorxBb6AC8F4fYGD/t93kNSKNSEuhUXq9FKGtxnCkxN880\n",
+ "BPb/uTbjLTQVyPNuYlGl/gTlyLcVA/cDoLrl5TvaR/AcSLFE7C/t3kLx0STNibmdAf4TsHWKSblH\n",
+ "VWB4X7oQHrrDdhwIivRgUZf7f63j2XaGB+cbp5aHCCwJoovY51YTqsZZTz70FlSnypPHQBNzif7h\n",
+ "uvZkXhtEzpu9rYMo3YECkgAAAXIBnmNqQ38BDchAitLfY16mYQAQlVmv7062W8KLpIS1/zhS50Ib\n",
+ "b3ERigmkZKZMPaCsAi+zsLcku/gHGHnVZpuCZMFs72gmyuL4JFo6VjWcr5FtBvzIgD26rBNvP73P\n",
+ "nJjl3JImmFHiKjNez/gG3zTuYyCACuJCEYXyuEmzCM13hdCPHKg5GZtso0Z1qk6T1k2oiqF/3RIn\n",
+ "kyjRWuxBlHHmJ46TXULiUY14G+RAGoXI+u/G6muNclld2bq+6Zztuy+5ynaDWNNjuN1Ag9KUIx2F\n",
+ "XwNdepmp52/rOvISNPbMJ0U26OvqplXi+qHTbg8MLpUSIGCY8w9FZ5woLAENgvgu9M79yGlL20e7\n",
+ "ypJ4RMBqHYDpEz6Z+SSjXD8LsJ7VKlwo22A5Yukp1vTp6HHA35nV+PXK09DuRWKKdQUzmXVihF51\n",
+ "/+bB0PEFdoNxGdbbM7WveaCJN8XI7JgQWvw2nPlHX8M5QyPGSJ2HEexumoFrABvRAAAB70GaaEmo\n",
+ "QWiZTAgj//61KoCPNGHq/MxnjqmxxQAEHvTwibmyMZGX3ES9Abh1tMR+/DjR+6dnqRr/VxCl6gEP\n",
+ "wJ/5EYCYfGaGmQYsLOeM3v2SZjdvqQBwrwKk5A/63kFm8fc3QCLe93Mldv3KWXHdFT7/mudSntDc\n",
+ "vJwStG4jgi5LKlWdSrVaAxOmElsF+zWNzaCIQ1dOiZqi3JKj64hOeq1XIWyGvRvh6OLKBpB4rL6W\n",
+ "ugf7H/IPbSQuF5jWV7zL5LhxWiTiI+kAZTUMfO2YOLzmhCUSN9GAmNzgY4D2awYB4V4QTDjI7kdQ\n",
+ "tL+3Pmfl1HVilu7nC9CzQSvWIosiwv4btyHTL7IPT2gusybyNfW8QO133L6KbDhhXSDWUtcIFCgn\n",
+ "QUm36C9hvgGjorpKYr5VnErpJX6fRJm76fFYs8/nt763alyqdcSrqaTOLaf/72Wkkmlwbq3nLOIw\n",
+ "ADFDkkAPwzaM811K11iK/3HaYRT3nEhjJQFk5v4WBXwIVLAZeKdtC8YoGN9K6isN142fOG3s6fm4\n",
+ "J1nMtOEZHIwep8In4slLmHh39qBzhGZO3igiVpgz7u+JMBeFkVHe72vduBjIy+1dqvxL/TPics3s\n",
+ "+alwfTMNQKave1qW+5Uj8jZQTjcLAtKvzoako9VMIOfQUQAAAQpBnoZFESw7/wC9ZU4P+UeGsidW\n",
+ "4n5tFkXmtxppYvKQ+WGj/x3AAdl6+9c9x7N2b/yJykTvVggfpMnFUWtxla4sr1ouwANom+Uf4IBJ\n",
+ "/zXPovndpGdy98nJbZxFU4rrWpr8aI4YmRX65+IGTn756CZWwXKY5DyMgKnDcCtk0HEuoHgdGhh7\n",
+ "1PG8+nue+pE9pBHqiBNWAjPd90qfMtABmMShLoXtUObqYbqXhJvVjjFhKdPS03IF24fu9Z0ax15V\n",
+ "DnkiLmgyOCvJmcdIX70L2ZEECd/hxrSq9JUVjC41OX0F/ayI6GtkPMUuZ2xWkMFo5rqOAo7v0Zlk\n",
+ "ke/79TjeY13FNiowqcbhMwfDuwAAATIBnqV0Q38BDXNpg2t4nJdhAA5ru/5Co2KbB/AnQt7fa959\n",
+ "0crOQgtTxL36jtVyKPmfuQMYuWbJ/7bYTEV8sEjceHvN6B0CSEZzVCjaPLzOQJZMQpQ4K4WKPlGc\n",
+ "lnEwYAC9Dsejj7Fbk2RyCFiJinyU2HOscjUR6fW2jRsAFpVq/PtZDVPvesPG3AqooVaKHp9Ex+Da\n",
+ "AH0OvccSugyDKsRBAEiYR8645aXxbFSzraQsELDsIIr6HRN8F3lUNVBvzNO3mxBhq4th/kgZSjjJ\n",
+ "JZrYmg3UfIUO/jn4xs2XQ9Pa7Uy5K3JhuIQwAOUKDmAMC0p6fgz2on4ceyEcfiCGDPZpPyL3391F\n",
+ "dXID0ctPQ1a+Hk7UcAc9gSDL8CZKz59YyO0ACPjfAKV3Y2dbTAKdWBsUU0EAAAFEAZ6nakN/AItk\n",
+ "aaqbMCcBE0iEIDnEBfRZN0neHQxaz5DPSzK0ZSL640q0AA5jkP0YAYAumNCN0MxJYpWFoQ9r43H0\n",
+ "i9SZLdv1UbgpG3aX6KESZW7AgdlevaBngH/w8xYsqWx5t90zzi7x9VyRYpIAD+XTrxvgBoFILNCs\n",
+ "gd+zDA9uvbAPlLMwG/qFltlwvLokMt344erv3a/C/ySOwZHFzpakInpJ7MQHkmKi1KHZB5KrfqwF\n",
+ "FnglZJwWbe7LtVojTdwQnAksziDNlEWCkMQQJwziY1KYtlXMNX8mZ3MtYR1KNf/CNin7/ys9ZQyx\n",
+ "4Zlk//H5KDc/8O2+JaxH20CAaAABxgSxo+yJal1LnRHYfOQ1TygNueW/rPAA37g/6fLS7mbYKz7k\n",
+ "dsiSiy1mAV7n/qq81UHJPShQSXK+E4Y5XKuXEWG4AAAB8UGarEmoQWyZTAgj//61KoAW7kO9JCjl\n",
+ "XSE6nAngAJVxWWFl/YDS0gZ32xjwUFed4hmI6rj18z16nS3Mz1iMmFblrtaE4zGXS046COODiIwH\n",
+ "QG5lRmcBExMKlnynQruQtA8n/NitzdP/ysLrucGyp5nKV+XyJURULfxk4kwNp0a5TFlJ1fusOOJm\n",
+ "y0hvsvEg+d4Jz3anvWT6M9n5A84CGucNifV+WlN9gI9gs3qSoCZdU/gglcFYM5u8YchzhQFyMKxn\n",
+ "kpfWK2LU7aaZHt6xLbqjuv74523K9/dtrrsFq/LySiv1P9Wk6/6d5RC72z4cyaUq6hMMn4IWWRo0\n",
+ "zJIM1/lSYsWxt5/M1Mkv00Rt8OZvmLxuFfd1BIVlANlpgZ39RYhqqzU6v1HwaW0EudelFBGhr5mf\n",
+ "GaDE05Z8ywp5rN4Qq4D4GNAGD/qgEjtaDDf4ZBAD/TAHBwxfNjm2nPAdbbbIuWSkkv8NK6EMlKqH\n",
+ "mOktd+CB3P6Szd1+HPnUsyQ3659r3XLnoi0cvM4usfW+BgxqT0mgHSgn/F6ajdTNM+a8xJQnT036\n",
+ "7195r0uF5vwi7PIviCQ2E4Vs4Wx80/8tBDEJS4qOY1YJ5aNV1OV82fB3HOimLHd2vU/d4Cv7OBh8\n",
+ "k3gNFcjeBGh+3lQcDCLZrG1mAAAA3kGeykUVLDv/AGVBMHxAlJYGEpFnv2bb0ADrwvVKxe7+SIJI\n",
+ "g0dPJdL0s9Hd2mGX7rpdIiUH9ZgtnBO+m3uPNae/YtN3u2p0kkCez2KiPNqgSoEcHM+ePgq7afkq\n",
+ "0HHTSZl/+QbjsyfbI/0lv1mLAJUd3u7VZPPHSdXK3vwLfAwOe3Nid72slU892DijWVvanzM1IzDQ\n",
+ "XfN6x6GH2qfaLrHePrJTJxXC/RSxcAol7x2JJ5OA8VjN8jXu0yKirBiYqgcdFf9odG8j4bRmE2wD\n",
+ "MG0SKuGrJfd91b6B7hbRUwAAAPYBnul0Q38Ahz7YAbwPIqnkAA5sEIcKo2/sVUP0LEeFOLjKjaet\n",
+ "5YFAjDbL5BIdGqWouG/H8ozoec2ZpUbIZu0ELtG5yXc/5opSZlnqbOpqdTQkLs6gr9dv5GbFvVjS\n",
+ "Os1j9FIMQsdc8pttosNtygWB8gLxr65El6umAZE5CVU9Mc8Xxg/tenmTduGK9Cd7qRDiu1sLYR2f\n",
+ "or3KBMo8ebz5q5EmWucvREbYSziQIIycIwJg9OG+aH+ZUEQbjbfHfaiX7yoxGJGP78aNOHP7GvC+\n",
+ "JwM6DxnSyowUBAqkW8ckgrhet8gYYrt8MIe1MPJQB6sv8hHuAXkAAAFWAZ7rakN/AI9XvmYGr0rf\n",
+ "QEvrPPTQWEAA5ru3wBCXPJiC8OaE25OBvVl2wRXqp61wQU4HxGJCAxkSOz+G3Yzvg36uCK8bPZTq\n",
+ "avaOG/H9WxjsuwAl/bIYJdnyD151CiUZ34aErVIixKJ53oKrLeHr3xLgxuH+y3w5uH5lQRsL0Pmp\n",
+ "0jQItTBkKwlPywxFk55pROuYZWi/h/N19QaFlF7WPobUElLlr+nCH+pVt1nW9/YwVGz/cO8zwmWe\n",
+ "Fb0OnFji7CYSsi9ScC3a50GjUP7IpaY5NAHv33V57bkO/BD6dnreymTbSmQdcj7PAJkvz610fMqn\n",
+ "mDGTMB31oxAIE5eWeH7mBZouSgmtxEamul7sYaTPe7mP6FqNCz0h6wLot/zAFwx9/D2+XB0x8mmS\n",
+ "b086o+gqkoYoHQeQm2Sb3MU1Bz0KHDGo9jCmsBmecxs3oNHV4KaIoLKAAAABrEGa8EmoQWyZTAgj\n",
+ "//61KoAcdmk2P6doyaR4wEHxsIcmssCD5f+3/v8PGtlbWZ+A0oGGFPTAdgmU2TFbrRxlmwUCouNe\n",
+ "8freV7blHDodFImzwP3saA3AZT6NUl7vDGH/tw5n9y8rP4XGnhEXBHK+6jIhoAYc6G1CDX0mqczJ\n",
+ "7tbei5I0YSkDjza4rJSbAF6cRoJQH3s2Q+ggBQR0BfH6N3QlPVwd9YFvP6++J+XrbNU56Pxu6Wey\n",
+ "51asar4AaARXHregTXL4xn/VNt8Ppk2xD3/1jXAVXdqMlS0tYGM/TtrcuTC63Lx21RQtklG6k0xA\n",
+ "eWm6W0oL0KTvxuyegpC2ySp5v6zpSEYvzWR4IYirfT0RYU+jLtX0t4M/L/0k8xOLTHbouoUPD6DN\n",
+ "dYYLYlVX5noJzjCAVCiS21OCcIKqWD/YiU/+dTZpdFFNdHEa/MPvUEq7cJD7ANJ0YUweepq2Eqdh\n",
+ "57SC4Tpg6jyEnFgMaHQLSz1nJNh4lxM1TPouGZ9bmQdDr9WY+nwzRBa+ZLnaqBSYKWSKEs/TNtNZ\n",
+ "ev7d+EnJUf9G9CAmmiSDlRAvAAAAz0GfDkUVLDv/AGU2nAwHHyQlvUxuENDSO8vXFIAPilnMlQWb\n",
+ "nTHwb8wkIo6JKOaIP9blrrNXcWeeQDVprB1Bn//+nbSDHls1apJcUyMHUmojA58P91gutTiF40zp\n",
+ "fDaF096G01gcvpH5Za4+DfUvxQpt/wH5PntJzggww1tLhP1NyH5U2TTgrnA/BevK2aCa9xCuCVgA\n",
+ "JJZF4uqHE//COeWbJ6LIFJPoadxAxbrAcxPQQHMzEG5G5S3Yfd+YJBLrdO35JvVrsUTYO4AfvJeC\n",
+ "zwAAAe8Bny10Q38Aj03WPPyvISnWAC7KM5WfLH925SBeAKcvJaYOa5WZCzX9H5nU/7qAFTCgAnl3\n",
+ "rAoSnKk1337XDAnLfPYAAOSIcqQwF++e4HouwNVAWCEsVyl7Y6DnBaBT2mD1H8560KoMvm3kKNNC\n",
+ "oxFCc4BdAIXk45JUbGFNGYAjCbBbJInMjwa41HA404yKnJG7rNXdBctnsSL/36UoXvVx3J2tGX84\n",
+ "+FHk7e72CsAyB49ajd62idmFQji9Jj1GaiqtCIjWs5o6Mz8s5QfrvipNYYD0YZ7gBBGm4AEz17d8\n",
+ "isscgsp4QI2odbuEJDq1nfJbW6+1HGcN1XfDC1Xfa5IptM5UYHm5zIT4rSPBIDE6l8/NhVxlFP21\n",
+ "JPQ0DZxnZFvxIBznQbqkhaGZjMafgFoRzC9Nl17x+K6e75RlplRZtXaUIbjAUFBJIQPkoIrT6/O9\n",
+ "NtkAmnl8qqUC1RktW/RjiJqOyRTTITHqNKvKy/0gb88xEvvGPgzcSs2KpkbHJWmCGIlSWEkuqcCE\n",
+ "jBn3Y8XOQxMUxEYeLPJ/9s/F2fT5NAnko+RFlv75fWLekZZP2s17yJ5ccFGhZyrkGX6u7xXK7N8G\n",
+ "Qlz8qfOHvgMQrlB8p4j7qtnPgBPf8mcsM295CuAZxkK+sut074W+0hM24VMAAADaAZ8vakN/AI9G\n",
+ "UrhSy/Rrhc/LGXguupji5cAHC2DVoxU1gWUkKeMT366GcmuxH5O8lBZJeHl8r2KNT0EaVARyW7pN\n",
+ "L4uNsKKl/WAzLJ1OZWTQf4NaAfodQGO9KzZS0j6oGvr/urKiQwbP44Tv//glYQyyCFeq+8nnrHBj\n",
+ "aACu2w1otySh0DYMX412uY6EYcx3GtQaRpNPiKQniWdVV2KH48fVxDy0uLS0SmCZEAWLVNvtWqO+\n",
+ "q2OwCBr1m50s0i8eRTlSP9xoKtxWC4ZqL77eAW3kYEBJOAywYUAAAAH6QZs0SahBbJlMCCP//rUq\n",
+ "gBY3NzYDjVIwwAKbp/vtZn3NtK6t0V/4sA0MV4ijJVoTZ+e36T0E9eQ0LOyzsqR0ULZJUDRy41oM\n",
+ "RdsBwM4wyEJC67daWmuDEXKhZo862uqAH8A0QJ5u5RKBPFpngChYYJdWzP3onEWImG8Yryy/SXt0\n",
+ "jQ5te76AagLius72bzwZ4AZfLm/04ID6oXhPwqkf1cNsu4/kIt7oCOETiL+lzwHLEnEsdPSz3DxD\n",
+ "uLGkH8o6jHofDxEXcB6cOS43aUxGKPYPtHCj2gw6RzcRoX5lD5mwqtoCTxk6N8TxyipSUyNnbA2b\n",
+ "G5NuBUVLHTce3QKY3SdkbyH/wzdOpT3YHUE+FYQwMKCF6SMyMBxp2gI9k4yUZYljUiekF2XIFkfv\n",
+ "TFy1RUmikOycLKkTYTreTarsMD5JfjZ2FJWrroj/YX+uNeGtKNZl9Zyt+k8u4Htq1bPYEjCrLHds\n",
+ "qeIuFWmvxTYEQblStjDXmWfITtxy8KvOgn9iV+KlidrnVhlE7Dz30fuHXxxFZvIzhgU9uv6sSC7T\n",
+ "vZuGMsKGBGTYmSe0P9hLI2VyM/8GUWwG/AITiU4a7OVDjUNRPaiIEt8jt2oImPIY8qcrJ82CVd+P\n",
+ "mSjoppoeHUTHmeo+koGqjhwT7ueVHNT5VZ4yuGKEDdFfEIkAAAEMQZ9SRRUsO/8AYrbCELHs5dcg\n",
+ "AyOPuRHZUWtdXLx9XaNQixO/8Cc4Q2MgEa/wKETsHiR8C1XOv7rI3JB0rg46JfjEArbHaTHmANKo\n",
+ "+czcI/sIduYNFOE3TvObMh/KtGpZSdF+qnDDtY8zD+7RQUdzmkG5zeDj3u4Vq+f3qnKCwgbU+U0R\n",
+ "dQR9Q60wXqL03p/iYVxkI8jJqvkECuxT7efJI+5rmzyP1yn+WKY2EsjjB7bwwVfe6RxBmzR9Ed/9\n",
+ "CA95ILUJxNg4HsmCO2Ko+MqZAH3wMlG18kUm2ogL3cKIkVXogjofyKhbsSpKLpFFk71DzB6NrY/3\n",
+ "HfknWM2yn9yeQB/joufGEf/bvMAS8QAAAN4Bn3F0Q38Ado97WJWiqN4XS53kTA5YWsnJBdebpf+9\n",
+ "lcN5zPySAC6fH/XzBsBKbxdm4pTiPFVrmGXyhaRiB6dxtlwj8MyI40Do8AXHq41BAunk4K4PTgzR\n",
+ "rFycWqaL549wB2C5jNCLXlq6Tuytik3ijlMSkx9noeIG2Lc83eWkRkQieksQSO4xI1tzzkdqaNhG\n",
+ "ExZARu3MauZwrBopslb/ZLdR5ZS0G6p8o9DD5cphJjxJoSV/70/0Gr+woS8Zj0JpVvvpygE5bXQp\n",
+ "/YBCqjmq4uOCyt9SvCzPelUEwXEAAAGyAZ9zakN/AHZ6+HiwE6fxvgA5rqP9zmI+FShvhJS43N4N\n",
+ "sc5a7qq0DK7DHadXkQxf+APmeqLrIGM9X5aCQgeyxdoAlcQoyNsm6ol85w5z6JV8A3YntmCae+s8\n",
+ "+8/Yheg1ctJWrSharoeypUyemQeq9Rm5cIkSOS9Ej0hbIHyFhPQW6K3SawgMNVKQ0s1BpJvXDQSY\n",
+ "x3jIEdIgEtwe7zce/DjcO3RNN3g+SlPoM7cl0qJbM44NIDG9JGXcwVrY/YKNrpChX0yegP2ZHDI1\n",
+ "MzOs5eWP/2l5loJrLid2mK4Qhw6EGFrIadsV8rSjzgHRNuzJ4U3JdubidEobU0ehkU0P6MYRK/XM\n",
+ "58mVywGbsw6LPu56h1S4w3zHGYMd1zPKOsnCUhaRfrSZTxvjerNQ22prVPqBstk4JgHdnSScrwGw\n",
+ "eQcqvIw7gKhonPDKM4fJtO4n2EsI5Cd0iGMjmgPw/PU3FL8ZP3QbYLMwZ81Wd7BLLBDf+ngKiFIe\n",
+ "it4neyhhaE/a71b8TxeM/ZrgH9+D76dlgPI1ZJW6CCVyIs6Y5gK2plkcgRYa0MwWF+1A6zPtBEgA\n",
+ "LOAAAAIIQZt4SahBbJlMCCP//rUqgBY9we30eRuAA2kMf/9/gX2SHKs8Uq31+W7Vx4LugxILnhMT\n",
+ "6icG5WQzdpL8yjIXjBq99nVaYweUdJE3LrdOpsVxNJ3kODVBkposYOoRuOMi/SNhcjrJwShp6ljG\n",
+ "Qs7tSeRJSYDkvm+SI2ckjbManbEesw6wo2ZffuryaLuWkU9SNALC+2QbPJD4bFy7sTmB9+6VOdMm\n",
+ "rnLvYN4ZyAJz7OhQG85P+JnxdgXgvSv66sWBs05p3vOE+53H+HQCMTLVgvoYmHNTIYtZ5CIln4hA\n",
+ "GrjLg53unVVQTiYlSzZrRE2vmtsqac+v6CrcbtgC4HktflvPTsvgqWNHri9NWa+EuXgx/AgGkZVJ\n",
+ "r1n6gAd3jtjLtv6YvbPiBBo2AhBUxCbYyroAjcvjwUBtRjXTdDEvdYfItmTKA7W3+KvVi/PCtod6\n",
+ "/3gOoaA7zRdO+8+MHlGl/c2xzQhj2O1n8eJkOu+NcsBkpmxyosDi11EOEaiQ6vfnOvH9MSM+7D/v\n",
+ "k91SLlwv/nF+5eDPHSLZQIoFUjHjwVoSGCdOLqmIe6tsfTERCeAhC+1bhRhe0612KIL6izjolsR2\n",
+ "nUgrl1o39HqnKAVqQ/HguEezLTgmGW27Df2kp4E1wRl/EQgEcsMfBPga1ndY4uHPYq84ArNCWk+c\n",
+ "YwxlHAPVC3PK3Zp2kQAAAWFBn5ZFFSw7/wBXFVHDEfqz5TAg6AmqzzGCl9B1ICKhB+tKz4Y9Km1L\n",
+ "/vZyZ1OR5rO815FlrTgGoncUDKVNjpKrVerCm+HleHb1b4FhYQG8B61zGq10uLuoQHIyL4Cv2/mm\n",
+ "s5Mi7ZftErBt64oWYphUyh0Hmn9dYYheGFzLdE9gvqcAEGJDyLZq+nfiK0Px8pHIgaIfsEdSUYcC\n",
+ "8Otyxta0EKY+Dm2m8AtQ8jjuDmkSHm/uLhgf1uCnztOKFhkR+ydRCeR9tnIlTfiv3gJbsPT8swjP\n",
+ "0OUm6yT8LhwwCJU0AGI9hN0/kTkz+NeSHjSPaBx26MAfS2Y5NEtva844h4B/RttjqxMsNDiDrfB4\n",
+ "5xn/Cl/3XrcF40eivyUSC+FHzx3M4BoLQLOKf7iz8hKiUrqRGVkGToUMxkr5192x9xCjbuvLRMd8\n",
+ "9Pel4WIOhSi52xuSf1eEhC5VVAp4lHpZmHCbgAAAAaABn7V0Q38AdnTaV3jxqK844c19uepGJJSA\n",
+ "C7DQuTz6pWfCzxcMbX5JwHItpyM9y3YT46z61a7h5Lyukp+nSKoO0zQhT0EB/u6ILUCNvVbb/89X\n",
+ "7TVI5UN6EFwYYfi4uoFmqb+5Cd0J/+d2405yTsK/f6WH/T+vNB1DYWrW67ctgHOgMHAWDLG9mitl\n",
+ "16bXmPVSi2sWzpWYg3147nlnaD00aZHqQlrMPzYTLLFwWHOLNqCoWpNLMMEevc8AnQWeykk9VNTU\n",
+ "NXzAXhrKDXl1tLQTxZG7GX3K9cQyeUnjfH3rMBGDD2zCLGXrMfPVl9EJ/F5M49Rjn38sXUf2JvF8\n",
+ "D9r9tV1APCHN27+egfFIMDg9OhrQMtjAe3WEfpYS7pl5yHh7ZZ2CedEo/Wf/ygYTAQFI72AaUTrV\n",
+ "n47d9OSqAdYs7lkgV0864auRyPQeTKK1Sp3ADeIFS134VGBNG1VnrfyZuznYkI2r0FVkGFrAXpUu\n",
+ "ZJmyKqqILhJ1OTBM8C0VBV2QXBYa2aSn2jj9t40/wJJWc9IGAVR0vj/u+wFocjwf4QAAAZYBn7dq\n",
+ "Q38AeUc/pR5QUuADgu7/kKjYlIf8yn+MfKKvFMJ4eRJz/DRqteBIBJsZW3T3phi3NzuSw0zOvEhr\n",
+ "CHz7xEUteyaR+fa6YCBeiCtangbUerW/UGoCobzV/74XB/lXH53NcEw+6x9o3/ZgwG/7l4psK3P0\n",
+ "EqSwtCrcKAAv8Wi0Z88mFp3Sp19shMF41mqYa8pNsyefrruQONS60LHg/1GySbrTeTWW74lCDwnt\n",
+ "BGXpwghp/QF087PP7hxkE8lvu8APh5F1FTiOCBSvJFm6yFC/tz24gmveLoV4Rq/qtYWRE09VDCDH\n",
+ "yjftToPMsyi4DoCtXsPRk5Jxr9Mn6xDxGjfz8uMmOKJ15ejPi/Sx9cR1QrBsU9dhcYifdB+c0AMF\n",
+ "PolB3N4pBZAASP6m7EzaTer6yZ2sIKcQdlGt9xsZ0SHtS2313gpdJkLEVrHpO5/BTcfUTTcK1+bC\n",
+ "PwRYX+iIyInP1m6htprdy84ySZ5IaGCpRKFxMCf5w22wXyyon+dlMPKACguyEPTCCZQ2MqEuC+sa\n",
+ "uB/hAAABxUGbvEmoQWyZTAgj//61KoAXgR9s4tVmwJ9HTza3s57iAAoQf/wjqzjlXnP+29f12EfR\n",
+ "S7B+4I2epG2qM/uoQ7VlrfXFlhjyX/aTq0n55QXAKa2xUKolKsuMfmZFFc6+GP96b13JiSidvPgt\n",
+ "2SSGnq9Yw4MfceFmgOaZRcwoMnpdb0UpI73YdP+DfypKyrkDqKWcBc/BGhrH8+XdnpCNDXfg5rMl\n",
+ "b0uFlQ11yUxnDYOfRwLbdjJA6FYddawSEVorFtY7jkSQx+OUBUgWkKC9rhKB+uV/yqQsvbuFiyYV\n",
+ "MviBpsZgSSN0TOC5JedQ5H38ENVBLjXnWZD9PQyueLoT4qwtI+7lodFSnBG3zboWdj6P7XDbgKT/\n",
+ "zKkFObUjwhstiQtohzxd5AXhBH3DQqNv6mRzuMxFDcTEo5ut/0/1HrPGOF4R3sJ/eQT+YnYseqvc\n",
+ "0m5njpgI3qkLmn8efBB4q3zWGpHCxBwC84HKjuugMICuXfcJHKn0aWkn65aEjT8AdxDWE09InGyo\n",
+ "EM1wsU0JgJ/qq/6MdHWfQW6+bt5xWlpYJ4axi9wZc3Aoz+Rixn8UVM2e/bd31+W37ucz9udquxnL\n",
+ "2JdNUAAAARlBn9pFFSw7/wBZVXkLa/7xg9HEtDOpc+GkSv0gCD3x6eQNkROUaCyL6QH8m/0USPLW\n",
+ "nllgC+uXg2X8kUpaUiErsLvwKd9y+trtKwV7xlvkAn0JqEnToCvptE1Sb8eF86DTi2ywy7WE/imn\n",
+ "jNBYQny1cV38ScnZp/V3phWQAYBG3kUdNNuj/FyVB7DgbQbTLK48AO5nLYv8B3LvBNBfBJ+ym1yg\n",
+ "YJXKwjm8kt8xUjO2UGKeggZOs7YHWr5Fj8OX4jV/B3/cMzP+f6YyrayA/80F6f9vgrbTlhWdlFQ8\n",
+ "QtrHKjmrl874OSSPJYH5wfQfF/1NrQd6soxjmSWYI9/FqOPoy6ujUPxQvg1fUda+wK31Cv8gD96H\n",
+ "LPqpgQAAAXkBn/l0Q38AeBaU9hYCjxV6lA176iBcJKIHTfhwkqkAB+a0LmdvcgdK3vyEsSkCI+8U\n",
+ "up3OQ4OQId/B45+Mf5P4Fc2VsfnQAACxyzNkvgEEYwZk+TyOR6/VZmeFNYMrBdqc2NNBlh56ISK/\n",
+ "h5V9lagvsX7yv0p9Hk6RXo3uoMgKhKOv/QgBAqhUvAKDw4DS7G31tehd/myRMmCPxIJ79bZsQe2/\n",
+ "iq7Nquzc/VDpPXFZHPvOmiyfyrt6Fxc2jLHZJGpvacPTIeLJiSaBxgRTEKBr/xXaKQjc5nLhlwgc\n",
+ "HSz1WRlyOsXOkob3rY8KoGVETaaIvHEl7sVHsV3QN7iR2rIGzf6YHv+c3l8OW1b7tAMShtcCLifl\n",
+ "8k1OtS8Z5o7MNTObuLXIONSPGo1fC97qRzqHFEfMZntEMqsFjjWPM6JduvRiAv8p/h0kRdcTeRox\n",
+ "t4PEdFJikYgCJgtFa00LDpNvd6Vv6MImiivCAgL9L7zEaNCr8p/p5ZiDugAAAO8Bn/tqQ38AfAnX\n",
+ "r+Rl0wYAC9kEZglKr0YEZPxbFiynbDVLyUoB5/4mwbggJCKqWcWLXkOc702XkfuMANGy7OD7QUCV\n",
+ "nopFHkp77AuzGvvM2JQndhYVkdbX30/kmHQDID1DcpthKQBbzUjm7wgAOqbulxKDc1OUw1plN1OA\n",
+ "iXs8Ju+zQDtZelKPfekDEF5iPA8IQMn3LLocZ168PVHW73hdmgfMFTsqduJxZ1oiezDuUBPUKdNQ\n",
+ "1lGg5KUsS5A9iNuo+n1shJKCmk20FfXGeNEywAjYeaq4bao/dd8nZn//htlIayY083IymAgdHbKW\n",
+ "UQAAAW1Bm/5JqEFsmUwUTBH//rUqgBbB5O6qXkABRezeefAxp9PjwxeDBuTTFSUNk2voPSz0T3Lj\n",
+ "1K/LmQtEI6YkskJKgxvIXHGf8LHTV/h2Mg/qV3IQ4zvBygOQs98iZyR5jgV+hQ58R6xIcus/6y5a\n",
+ "HrkViRrv8Sk7So3LYWmfkLzyR6vcCKhF/sCJsY8RS8BK5OOGU2Ll4Qs1n4jPQwTLDELf8SF2+07z\n",
+ "zB5hexERnOHmWZ9THKXS8j6NXPrj2p32k0gvmlI4b/Of9evEX9mDBp5GtQHOvTswQ/VYUajAUXz4\n",
+ "5w6EHuB/k+FBz9pe+B69syJ2X5MYn7Qi9rKpCl2kZv4uAWXuNo7oIaU7hr6elcFz53tdL9AEjCAb\n",
+ "BlT3p448134hjvo9lj95CHF5teK1w+R310Gc3NQ0eeJcsiYD2EoVrHHjVDF/m8I8JtTUFdJ3xm+G\n",
+ "muADOcIpcqYbeqyKWwHmgvRze+DMQbkLo4AlgQAAAR4Bnh1qQ38AfBSmnoPKZzTuFWeZOcrkeWeU\n",
+ "yVIALsozlefbqRZf6f7w7fkPoFSkdlxkJJsnO6qzfbc/Kotbm2yeFrIQw5yspszQL8gAAvMHKSnw\n",
+ "f4CTQ2vfLY55MADj1baDD7LZtn0UK1Eh1HnwXobc+mdHd/JEl/a2Tszf/EZ9+J7oMl+BYsjWKwNY\n",
+ "vOv5flnnPLcex/hWFIF4n+hpBybvasl5hI9mV0CeAAyAclftj8N9n7hadcpM/TOVmHbSkJ3cr/k+\n",
+ "StSwI8gY9k3tmbMSZc42caMpFr6YdNCCIj52zmNBccPNFxW+UT/4qCqtX1gc2j7obKDaWzC1yj1A\n",
+ "td8/VAjqVn+FzuuEokhhvubRT3RCdxeWnBTCG0CxwC7gAAACMkGaAknhClJlMCCP//61KoAXgkIw\n",
+ "VJpvAgAqN7f+5rJJcY8tkjj7p4LozjswOy2dTydK33mOBGS+NojRzBOlwt3ro+/vdQIUTIVrXKwh\n",
+ "2SrHPCPJXQoCjJUPkRODCmqbZeBHsv1r7iIOZPpX66HYYhWgPLvPzAb/Nqu9nQqKoyphhNy32+S5\n",
+ "qAFvjRKLSjPAx7GoKGUNMbYduhsBsrvVTwhrV8uWAls2mxYggJzVuRUZSL9cSt+tjl44BXjlbo1a\n",
+ "I7ybNHG97GCzcbSNcg0RA+iqwDsdnrZCO0zsNdWK1qVmER0PsSf0dicSrZwIcxZWy6JbkwQn5TnO\n",
+ "kAah3wAs6pJvW+a5ZiJHl6sVlU3yCOlrECAESqWu0YR75WfiMXgesBOuXGGNsC3icmPYNzM93us1\n",
+ "7GQTI6RmmFHGo+B2yAB2YJiK1YN/T0ltUuXfFAvL4UdHgEXOVIqVj+S+YpITMKy740IvYQ5zuZPD\n",
+ "ahdXF7HIU7xE0W12w+6qkuyZwxUMXLXdgx6svudMor1GNfDCdymcKIidhuuXh7vdQrgbivH7usVC\n",
+ "zjMqgjGahkW1YlmytCooEIoULx5ux9DK360iAi4u/nAomESdiosanRfQ9jQdJSpo4rurLfeCLF1Z\n",
+ "XsQAQRTcezHlxp1tz3A3WsYMA9urPBB8pUlDdB63MfZDCBphVx/Ddv1AMvPXFEPu18oREsV3BdKx\n",
+ "e3lxLWWpytzF3zXttYGgBb90j9DgRGE1uaAWyEAAAAEiQZ4gRTRMO/8AWVV6uU/hFqUNYqrP23yu\n",
+ "FpB+ECoAQNVnJ92i7ZF1i7u1D6K4L4gxm2RaiGsRDmf2iYWEjO8yGHAqwpcDep1/+H221WMh98AE\n",
+ "VV9Ferf+hy0D7Zu5rX4Hp3s1TpcNcEBIKPHVSHIzaZKKfPXkqE/ga/eepp8Bzdc39OW6g91hVVvf\n",
+ "WJxrnf77rapWbmivuJFfeO9u+RRykk/agdEi5E/5a475KGQprA2yl390PNrCvoamPyXbETwtbYAQ\n",
+ "pF9uDZkHdN/NQ1P4rz+zQLJx21eQsP9WBLswpDFYg9BjPw+3VrVEzeid2j5wJBlq+56Hw+Ex6fI6\n",
+ "1O0GbWSAC5/5Zg+kGX0Yx7/We9PseMWGwXWIVwqI7oHPEnK6wUkAAADgAZ5fdEN/AHk02mburIzA\n",
+ "1V5U+8CauxZABexQ9zxvy3GIkNn2+19EyZqnRm0DMMsXP4ZwiY8vW/qdBTlATfbmIFDxCTzt76+L\n",
+ "X3WaNfG+rqTfzj6gLFFHl5IJDtQmIC9KAmTgQM0Lp8TEDdYJnPYGFybq0Xdyl74+130DteV0SYTD\n",
+ "hgB6230zJvCx8ZW04pZHmYvtJ1LZAxF3BAWKPXcstkh7/Er8zYdPblR7K6t0r3b/sIHpME53VRBk\n",
+ "ggj1uN/p+iN4KwToxjP8kZ1opB7xpkyOQpicygiGnwjU7EpZpywAAAF2AZ5BakN/AIdka2Wer/IA\n",
+ "EJVZr+9KNmiS7zXHA/5uJU6D0CbJOrsLPWcfwAUCZZjhlCsnAlgzrrGOONmuxU3En1TfTKb/7Pu5\n",
+ "1R8PfIYkV/dZFitvMyRPMvzwXX1OcxtjbhM+M0LCh6zNEWJFi2Pi95t8cspIknD4iXNUblA3oEFp\n",
+ "VGuXt+8S3Upf64YqAxWADhb5zxXL+O/gnWiyawM9fyRrYcExecMkEiv5MHRsJs8Euzdps1vwxzNA\n",
+ "Zu4bu6ic2K2ueNja78qXGaHz7xLoPIVJv/T4KAuseyOhznfFtKf0Ey0eSBVK9qutGGF83lfe5Wtv\n",
+ "xb73lHTKLAyiyJassoDHBSQLAcUPb4nB6xWNr9G9gWtqEIp4Or9tKJzZIZ1tnIKZFZGb0ELAlV2+\n",
+ "pKKDz5nW+syHi871Soc3HtgomT3Y1cp83yQG1GdKkcJPkU1uJVzsVPzbXbSU7/z2Q7cikc4seN2D\n",
+ "ryQ1l58HjUs0ikCXV/V/CDkAAAH6QZpGSahBaJlMCCP//rUqgBbmS0XBN5gNQAaCJTjyhVwVkMwl\n",
+ "GF6KXnd0XUyzqjFCJEv0D2xQiJu8if6sKo6qHl+BP/MZw8ss5OKq407INzCjWOsjf2HTKyC5fNLK\n",
+ "wiJv+PzieOozn64ZK7RRud2QUaDe0kuhk4uCClSYQBImrxmWeEf/X9zH3+ilYhfoZigVm0IoMiuu\n",
+ "YX1ERVdg0Ld9E6wxbYMiQAGJU1qeeTwc8vb3w3kiJheTA2PNXtrJ98RwtpnhN6QxMe1dw+aQWI7S\n",
+ "j0oQ9iNx73N93RuNVRxXj/57S9VltjA0RTZBjLvYS81QDA3fBgaNHNzOBZ7dztz/rTxxOpumjTTw\n",
+ "x9FgnvlMsjx7FYPKUcXD5quVKd8lwTlOiGVI7X1HEv3Hh4EvpYVt6azhUBI1qGunVb3X1lyMhWJ9\n",
+ "p3muqcicwInEt+BuHY92HoNXaaJJbbQmNX5s3QJbI28Pg4gc2gaUF4SQRcBgM8uwcYUzxEkBS06L\n",
+ "0moZm8bwMsLYCLj3fgXOyFudpfg6jkYPDeVK811WbzEz8Hcd42XVL0EwE3bwDc+i2I4+NERo6J6l\n",
+ "d4d7nOIvqUuorZnDPtlYcfSWgBqdP0tQHvFb4Sv9QUCBvXlH2IEiNzo/daaHVtbFRNZ3cag2HOiP\n",
+ "lMxyt8xYJMnG7di2JiwAAAD7QZ5kRREsO/8AVwwP3fRRACC0tQoY45xe6yfL8KMHlR1wbd4HcPUC\n",
+ "+4PcnqOzdoNv80ufRyOopFYryJahX+qWFUVKK+nDtdvegTv/PqvENcT8ykEwwQ7z2oNUdaMITYi5\n",
+ "4tC5YA9FaLSBorMGx3aocAbiF8065MBqyaTkiW7FtGRHVSPubGixAl7hiQRoBoEipfCxkE/EBoII\n",
+ "omSCNrFRyjd8oY66cDfZt+iBI44uLDeP6eHMEpBALsV0FY7iWjBLaYO1t2PsklOb93SAExoyIX1I\n",
+ "TiPXiUgrCYe7dgepAF31BCnOuxiIAPWKLDHZLhGOJBLqdemk1EZoKCEAAAE5AZ6DdEN/AIteG4cJ\n",
+ "hGXgWAAHNd3/IaNiUh/zKhTXYgf+UKkbUvWJoLo7whMXByWkvy3MotNcPaSHeaKS5vKy/hBJIgk5\n",
+ "CWcdsbd5QzFHyjOIZiaEAA1AziqRPTDRRVYKhcrm181rAlAdaYmvKZAOu92pmI39/PSQjhiMouSe\n",
+ "XVT3pg0s+/zN7WMQCHqTmey2TTctwD0YnAH9CK4EMAw1jPCCTXgop9epuL/iXjup2S+LS3pGE3iO\n",
+ "oIHon+1ERGRC2Vp3b2QAstSXzK/2zI+bVnxf0PhgKqa/NeuEaF2SBGZ/TyqGPDnQfJRorCp1s+mw\n",
+ "tm/3aVbjKRTXeSwl+OCfF6rMqjf/Zw8/4yrjLNmiyOgD8OWqATkM50NFqOShrrTCaHdcxgVW70ss\n",
+ "cCXKxvzAUCe+4nK4C3zP8QAAAWMBnoVqQ38Ai2Rc7ISR6q0L0pberS7nbElvP1eAuajd6ehFPCEk\n",
+ "va4007gA4DkP0YAYAumNCN0kma3A2DvFPa+NTDmrilkXNhiNVTFRLzynsy8rdgQPBH6k5DFr/4eZ\n",
+ "jmJjfYPWB5+2eEYYc9uJ5Ni70hsVFfV+T8zp+ZkLZnd2wv7AZ7A8baF9R5O9oQlCkoVPxkDHTrmt\n",
+ "rElQhX8Fi0yj2+BVP5O9UNPGQU0+M3KYUTg9yTBG2cCw6Drt49/5M/86NN03F5R9JS9KGOfJjIlA\n",
+ "koCavGpTFqq7OYU0RM3ilfXBmxvL5QoIK28Uvs71J3h/IvKmg4v/14n3/eoSpqNUCC77ty2SgAAi\n",
+ "rxQNIHz2GF/lpTynlwsORrYNT1lJMVud8AAQb+/SaHWQXmhJ+8cZTt8XuMgG/t/hdF6GqyG0A/Pn\n",
+ "hWRq+asN+zBaeyQUWZrjl8ry0h3WPkAZksFb/gV7ABWxAAAB/0GaikmoQWyZTAgj//61KoAWw9mB\n",
+ "34Nmlq4DQoTYIkneVdOFHxDDrFwsv7yxZXXwNkGuLMduj7QGT/7lr2bNfzApMJfo9/ffM5g789Cz\n",
+ "1Mn0zxePHMHBL6IHHRVXWyqDMhVLYnQ9xFtc1jml18If/8STBCOf+AZjMnARcFmX1IwLt/ziVSoN\n",
+ "e4GPKKZqfZWytoW7461OuaeZ9dvtxrCL+W45zobgR5vOrVM+Opl+w/eFlupHlgpQBWgJcPy8sZC4\n",
+ "/O9laiYA63xx6M701UUvGFsRI+RM6anXyjKc7TVrmZ/YQKRjqB6Mejs2G1mTDkBn7T2ZURI2vZ3u\n",
+ "VXRNsQnGYDxRUokS3YRHs9LEF/gxKSdLEEiHDqcoIHyS2FPM+cIJRSvB7sxIA3hgfN/O4qDK6VO+\n",
+ "t71oi1H0Bkz1ugONnVTpQr+WeMS5AtXXNBMXU+ycO0+R9eRe9BwSk0V6tHm/HJ45oIYvyWTj3yZa\n",
+ "JQ6q+o4isbf26PsTbuSAcvQoMnzEXJkqElGJ8Z3rZtdkIzQW0DDnXeNRbj2wQmuUNBknMsWOw2/t\n",
+ "fD8BErzYLXI65PwTY+6R5c6RWYzF9HNMLBaO1c6cI4yEu1DMKtZW5FrmVuc6hg7VnWxgAgOdFKFA\n",
+ "QvmmcrbHsqCH4rkez1y5GoMlxeOuW5WKa/JdcefAflYgakEAAAEQQZ6oRRUsO/8AZUEtmg0dqwLy\n",
+ "ubLYtABfXw0ri+bvSnwBqWW9hB3/jYP94x5LyZNY560IvuBe5T4EX3/71Gbqj7BS5SJLQ7X1JK0z\n",
+ "I9iR6McwRU2BDEhu+2JQm1RA2fBVxnzCyNr1JVnfyyuumlkNzE8n1UgnkIbS/FMxc8DghB7zqZzK\n",
+ "rkagW0hHwSjNf+LJf3DnbXyvnzmB1lcv8Z9QlsnPKDef2giSgbZeTNWRMfeu91kckRy0SSKkaYVK\n",
+ "KUUpf450Vl2TzPLRaNhk7Du1IJzIJRf9supxssXD9v31LAVibgyznyLU/cS57Vr8KEXG+WpKysV+\n",
+ "6iQmQ/hCoRg82drzuniAPltxm8MMUZwVMGAAAAEzAZ7HdEN/AHUKF3WsfCAA7NAZyuGlRySXJzA8\n",
+ "WtPYIqCp+udF6BaVoG3w794kSqeP3syNbVlr+uFhruNMOOzTsNGrbATFZMl9DU6mhIXZ1HEAskmI\n",
+ "VVSgXlz4sVX35JqYrDPP8r9Bsg/O9tAp7LnTMjWlqOdgOPhHpyqf/hmokPsCwqtKfsDhxP/tmX60\n",
+ "fhM4KsfvpygzK8jmUmY/GDBCISRQeW6U8uaq8guf+cvy+sP09JLJ4HsULhIsm6kyYO04HBdOFUDr\n",
+ "/8IzlOKX3w/FCxhimlJIduY8iySAFQmALOuag1Ry1Z3p7NpGIGhZp/q5hzsMAsH2jpHXQPdtFNFH\n",
+ "4VkqDlRDeGqieCr6gwu3hPQQfF9yauq4qf5R+bfPha9tZ3XjpRO4eqNaj2xEQrcb5cIJOAAAAUsB\n",
+ "nslqQ38Aj1e+ZhXsJE07lvgA5ryx/X3Tt1hQ2T/wP93u+Km2fQtCsS47kHT/v+BMMbdxEWzwYvcd\n",
+ "d3NYalS7o/aUthPBRfYGmx2hUIQijLOXN4leC3SONeoCputIRor3Lgsy985K8UL4nvf1+pFmRQg0\n",
+ "eJgJ9ubt7jVqU4S6enDDZ82+hYwxDWOROomkxsOv8nlizRgAHHE1n42Dq5sLIu8oVYp/4M1h4rCy\n",
+ "m7AmDrR9dbHlpV6pqPLshIJSKr7R6XCF5H/mgt+78ttEoS2XxbrmVQj6DQtTzcYF1gqzE9DaiXTc\n",
+ "rKcf1aBAFclenBiNHhbAMEE20Br4FIkr51a0ynzJocMgaUhstOH+7gKJGCsTPkykOiVzQeIGOfi6\n",
+ "AmLkbzIds0NOnV21ExFbxIFAMu1BymG8Kjwvo1cLb7372R2f+Qt5Z8LjmGrBAAABxUGazkmoQWyZ\n",
+ "TAgj//61KoAWP/AeMmkxh4qDG8hcZFMZjYIY//v8PGtlbWZ+A0oGGFPTAdgmU2TFbrR0QmwUCouN\n",
+ "e8fq+V7LhZ4IhSGjAEZXRALCc6lvXQaVk4Hy29vGup69bTfpCSIWWGXFW7WfQjL50GRbZZRZHQ2m\n",
+ "pjAJ2N9/bloCCNQEfrVxCeDkKfJqKlRpIdnOUaiQpsnEysqkLqMfxaCLAtiv1vFXcLPLizzlMPs7\n",
+ "NIiiAuhD4+CMokPsODEut5yq6fM1zRym2P9iids6rfyvN0EtWlvUXkAIdmS8HfE5DlX5rtipWZ2i\n",
+ "d9rb+tQcwCfWN6erokI6tARQJu2c+ZSF/sI7qofDkfNVCHii2Msza0cnJEbLkEfdF+gBET2KrdRv\n",
+ "E5mgO+6ICEAI6O/h7r7DxvTQ9Wxzo3mHNo6898yojVZYUAEyiEUBn5+alz6XfA0d5GcOXFRjv906\n",
+ "SVSt5h/ZyjXd+HmcrubYPlDuxhjCrkqyrKcbhfJHp/Mq+DI065H9OXdNO/+uDSHvPcKkibqiAVhI\n",
+ "DqTA+NZM5+PbtXMsqU6iKpSzqr3AN5mBITP84n9JoTkmCR2U/+5h8eajZc3UcAAAAOdBnuxFFSw7\n",
+ "/wBlSP3uCsGGoV8bqfG+TF6JTvUuRSAD4pZzJUFnxrFOJYnshFJtjPOw7rAcguf7FPJIlPqbN5qs\n",
+ "fqCPl7TU74m2w4/OJHMnDpS1+crxo620hZORUqqaN/UeMSuSm/KKx2/MSsIgkvOy0fYS1MAD67Fk\n",
+ "Z5FUhBYQOPZatG+Xc3Icj+kvLjp5v9fX+nJsaNN4CCl0quEK1R//8eZO87p6DKKxlnRfV62uCNE9\n",
+ "o2MWYwf9qwHYbtyqG6I4xWPTngQnrsOmiw1Sy0bIvHiKKw6nsCsKdLVPqCFU/q5rppy8Ah4AAAIT\n",
+ "AZ8LdEN/AI9CIO0JMMhrV/0AB0HLuqwUdobO4BdVbPV1Ioua5WZC0IWTaPE/7qAFTCgAnl3rAoSn\n",
+ "Kk1336t4zGyyPYAAOSIcqQwF8zee7dn7XFk1tvgy6W/qOMTmkEiEdwceoRsnhNmrNp/TK9OoMIUg\n",
+ "ShyIuwXG8nP6tDCpAEYSuvpzo5kchXf9jICMUEGqQZjLulIdzbNUEecLTDRk1r3gpdToPPcXdXTM\n",
+ "AElxf3acmkXSo1kx4tBmKJrXm4kNQ2oDIaqLOc1dGZ+ccoProxsI+jQiCldj17rGF1/E4alcIa3L\n",
+ "dIofRLGOPkev2msNj9eN+tELiQktxoUq9fKnDsRx9Nbc5IkysRYA/KsIu02gpfPyisLPQwjLSjpr\n",
+ "jTxnZViCfPC6UCMSLVKUvso8AB0eV8Q+lldoHmqd+EeBeeJOkPU3vuU/GQacMWsLnKmVt/65Nw0r\n",
+ "y1AnL9+YKkDmvNgpqgQANfZvj5NhddHche/p4la1cXWhY3W/jmtWxMTkOC4tX16bao5sNwcVWRvt\n",
+ "UHjkDIOIXB+3akBV5Lzaef6YjjT1MeUeFh/FB0tOMV3Bhvdw35krP/ItZ1RF5hRCk1oYqz0ykGZW\n",
+ "YkciBlvCsweWM2wXwX55h7SZHtxiKM3rO4Aff+TOWGbe8hXaapPE+4wKof+j5KoQ530gP62KsQIG\n",
+ "BV49pf0LYkAEd7yVzO9dhYYFAAAA+QGfDWpDfwCPWoxxjdaiaFtca/OwfG9dSAC6jYuqYuZmzKSC\n",
+ "kzbTtnf9idy9v7frgKuFjQymibohZCHRXBQdujo9Laqcw233I4Za+//Mdf06kxHe/IBTsCsxcSfV\n",
+ "ksVUEdqCe9dEwWwg//4Ee8Le2gLXqz21e4jiFyBOjP5GsM1hpupcfwZtr5Mo/ou28BY4QZExXJ0H\n",
+ "FzCqK0jKq6c//ut1tsd+kiOyZUVGRAFVkS8bi0vvjrj3zga9Zaa6Mt7yQii43DdcrobbVIWdc0QI\n",
+ "3+rsc8fgmOnJ+GJGdWYzpFLd5zMjS5ofw5IMBt0GmHVcG82Z6YQkqKJHzQAAAe9BmxJJqEFsmUwI\n",
+ "I//+tSqAFjc3NgONUfiwAKbp/vtZn3NtK6t0V/4sA0MV4unWIJlE1N72EjQeUPmvxOpceaVXIrAK\n",
+ "21oMRdsBwM4wyEJDPiji6fXmMlmmsCvOtr78Aj8gA+xKnVDFjoVlH7PPNvnMo0iZJruZeFy1B4T9\n",
+ "/2iVnlLy1r3LZhoykeyNXqaKEANWeqYl2HjpH92g+fHSONko5D2m4SRKJwFWFllUBg2RTQ3etVYS\n",
+ "PdQGNCLeaZwhH8zjnIe5Vuu46VBC79Le/PF0x5A18FileZQS8Adcvcamp8leUQ9dML537b7ARaSt\n",
+ "9Lyu3Sdke9BouNe3+hTyxzxAi1Setn//aNMjVtdKZIT0wLvPIMCsfe3gvhpNMtez9cWJYRUO4qU0\n",
+ "Dlg6h/pUIog+BzidDDvn6SZ9WUgEXhGZOFeOBYowQfwTGI3ac1V8O93aTpJwa/om7scQbOrwAjjK\n",
+ "gaYt9yqViBt3FWYRIoJJGYqmGJkf0tLvcymA+Hyayho8kg3J33tLzi7Gkd8xVzsn0AbjvoJ9u5le\n",
+ "OKsB4L1kcStddnytXouu9GStBCQSRLPeb+iGeZTwQ5uYY8D5fTAcb3C6Ob+B7IWRbbytzq93Kz0y\n",
+ "yYvbeUq1qJCNW3/zJeXeH+8yV69x5FRyM+55j6UAAAEdQZ8wRRUsO/8AYsUcQvOGOSSADI46r94B\n",
+ "/W+PEO3biH5wUahFid/4E5wZcJb1S+5KPsyD0qQEL2HibG5BPsDLysut2eDJfU6ijjP6zrYmNEWR\n",
+ "huQfgh9NsMVuoggiphkYt9ccXxVhYHn++9K8YAnkm28Kzp0jUWHgD2VeIoDjCfJPNnBqH+CERm3s\n",
+ "nubUQ9LmttVf/+MNJAJgtOFW5A6IBAcBpJtd5kPS+zJ8VxzguhOiD6Pf/zfgjMDUsehmT57QUanw\n",
+ "gbdNgBf1mSXZw3Czfs4swXmaj+42V39PQblTRJ5hVxxBfyBMHdtD+eP+pUlQP8pBAAnf3v75+Q0T\n",
+ "L19oeS5dx79IIwiodA3vtFf2KOiU2gODZqY3kJGizWNAAAAA3AGfT3RDfwB2j3tYlaKo3hdLneRM\n",
+ "Dlhayh8NourV4B4kYRi+kgAOdUf8hAGAI5XCPTeroAwXn8G2yGEphnv3FPeZqmLNmvgLgUkPciaQ\n",
+ "A3x0WVLvMk+lZn6cJdklOXHEnjNKsClw6wU0RbMDBk1zQUzYb/75rZ2h0N0KqL096XGATDutyhUZ\n",
+ "RVkyTgfbEgHdPAmzdroStgpcOUEN4xVVZX2E+XrryGs2/tIi+iUaglsBszkGSHUeEuoEpHc8PRHH\n",
+ "tDc+6s5rO2oABm+Gux/PUd+4yoXEBbF4DtdMIooAAAHGAZ9RakN/AHaNgkMVTymoPnXABzXUf7nM\n",
+ "R8KlDfCSlxubwbY5y13VVoGV2GO0t+vExf+APmeqLrIGM9X5aCQgGSaQJX4OQoECqyNRzFZQDLhW\n",
+ "KA4dfYJp7oYRPF8AMOzGYqm7AO7w7FtM2J0yD1XqM3LrKYS1dGZTAzMM0YXyhFuS7+8HWwRTCnl1\n",
+ "B1MtLMYaA8qvJY/AATH13D2takXBcx78I1sCsI+P57X6Q2Nh62/bggQuV3uhAAN0tyrIgbNQYVBH\n",
+ "gFwoUmXrxaEApAv0P2E40tM9SJDDcZe8DyE7ljCyxGjQA+gKJHzTkZCCQsmlxDg5It6wsdQ6cusN\n",
+ "DyWnlyoq3MMo7ugMYcm1YMEY73l36Y/R5wo4wUzuNvV2tJ3rSYBCfXsVjc5o1oA8OllKUpgpBG5u\n",
+ "9AavXOqCqjA07sUF9WlQ9JPrhiXa9bThYRp0lNBazKKlKwsBPK9zJ1/OayuptCCUOtFLyDYWpp2k\n",
+ "qNXWH8r0IpnJjxnQFcNmI3LKk+rH0vqX+48vd2BUqTcJ4rwX4e+V6oU1+lJyU8fmS4Kj/iQFUx5A\n",
+ "ntiGKLVWwqfkoYN2YexrEPVBTpKi81wf61aU8NAxYQAAAjdBm1ZJqEFsmUwII//+tSqAFj3B7fR5\n",
+ "G4ADaQx//3+BfZIcqzxSrotcVc8CLm7cBBc8JifUTg3KyGbsl0UtvUGR3t77PRffuzjjVfcKeiAp\n",
+ "EmDpLoqmMXTQU5wmHksjapt36fasfEiGyN1dOKyOI9nT0TFFL0pzQSss7Ux5GajOaQUF29zSIoeo\n",
+ "7hOusjWiFyZylISVuEBU8nCgDYn9P601XpFko2u3FAuYp/svCLJOzc9W7b14FY05eVZdhfmiv0Wm\n",
+ "d+i5ZPIv9mhB+8Cb50V0LQeFfsyfPeAABtfp/HIPaN+amWONE9vQ2YbC1JsqKljPbi6Vrd258gHB\n",
+ "PNyXvESqATfkK1Gnk0AWxo7XFr5y0Ce95pJr1n6gAd91M5RV5lL/XAgE7sYG4524aA+cXAa2XPdd\n",
+ "1BugfbN6YGWbktwAoVIXoUq7TnrmhBrw2FHa1aE9uMJerl9x/Rs847iKP+iuBUD2VIUOVa/G9Po0\n",
+ "ksPo1bHVIsITIKnrhXV1NabDgHAc5kIv+PJk6IroGA19oMw2I1d4rGiaYQZE9dmK1VRARJ9VXDBJ\n",
+ "Vlz3aoQhCyQZvwzvxWhVA1iU1RO1TWnJsppajNeO4Vg4/b+BSviIvrSwwqmjaRr8iuCpVTgz+ZJ6\n",
+ "95zLiSdnoIFqQJA1Hz4YR/KIOmAfhTTnHcdDelso1m8Bx2oHlzAOiYwR4NhSSRD6EhhCU2kXf5vn\n",
+ "vYdShk1Y3/pp+Wd9yZwIwTneJB0AoI0bbmfrtbbWj1oAAAFQQZ90RRUsO/8AVxVRwqizyog1fzvw\n",
+ "w3oFk0s5kH60rPhj0qbUv+9nJnU5H1hbksC+yivmpdt3FAylOp/Re8NoooEKQr4q7MX/kjNCB5zj\n",
+ "aCmG5E3TxVGWGCYMCsdEF1I+HuXX2a3wLCwf1iqCfznNMRG46GE6nIgxc91oY/zfMduLLCzyb8AQ\n",
+ "b20W2eRODsXd4+7XC1RndLreJ7Km543AdL1iUo99hYdoASXjyWRNv6wvJrmyFngIDlQOrLluZf/9\n",
+ "T8Y21pcggXpfTtvdj+B+3lZv29AFHkL2xGPZvyL4UyVUgb3U1DWd/iySeGzlK1IbRNu7obP1czi4\n",
+ "Rchm1nI/pS+cSuamJbhlQHIreF0u2/zcrSGkuOpbObSfAY//5j6RVfcQovw5wL1RQN0tcA1GtFxu\n",
+ "ZpovaLthGUkeOPh8iV5bEpupJR1R79Ew1sEkTDugAAABwQGfk3RDfwB2dNpntdq7wHtHkfExb8Mi\n",
+ "4AOIW+6weDVD4WeLhja/JOA5FtORnuW7CfHWfWrXcPJWyNJJfpx2maEKeggtR3RVEAdA1a1truYO\n",
+ "N3PBvt2C5hri51AyWveiUQtRNh8OhcT8b+NVPo5dLHlfN2wr8ZipKDuUP3k1md+EiPqVCrK5TuMQ\n",
+ "knvfHHEV8fXqrrFiHhWYrAGbSJdOrXgrQTN4JDv0LMwXs1Nl1nmEdfSgT5BF3DohYi4r2xGfiJcJ\n",
+ "KMZ1oPHaRBjgxhu40ZP5HqUG5rQWHD92UCH/Terh0cf4e0554mxHgDF9CBXD2Ey6LaV8LB9Jb9nA\n",
+ "f7tFFMQRIVaLiP+uig+B5OoeaCY5+GdEeHuY+ZE9jNToZ4yOUwNfysZaXJBrtfqEkQosI3EYRZQA\n",
+ "COu9BHjZjXsKjEmWe9Jj9yWusbXq4WMANyEJEPNSeDcqy2nLsc2OqSE4CgyCqy8blbRZqycUiZt/\n",
+ "3NpFflI5dk/7eeQ8Uo727U5FhceNm/3Tv/0N3CZNlPGV4f+3/HHJknpIjibzMw4AkTq3Lkxy1XZ+\n",
+ "FA9yAR3cZ0/eN1EscyudULe5dTvs1EvlYMWBAAABtgGflWpDfwB5Rz+lHWcxYALocP/IVGxKQ/5l\n",
+ "P8Y+UVeKYTw8iTn+GjVV8vbhgCZ5cI/70wvHdrfJYaZZyRIawh8+61+/vwo8HAkEyAQL0QVrU8Db\n",
+ "Z7+ORIRATWUQyS/LIyP8q4/O5rf7OuybqgrrJ5JQm3dvb5EYgnYLHCULt4xtpfvTsT5gEynxu9HL\n",
+ "Km20sO4q1oqcF4MPx2dj7xETa3veUfVJqfvwop/9NWsmPrdhY/wz7rinYt2HcWm7+ulSBZtWIRv3\n",
+ "yMRoNM+lyCvZDr0PaN2HfwYWOYr/NgyLM3qvI6TujkJkGWBIPuiFK/SHsSPx7iAMcrZ3CQvQC1rq\n",
+ "psLEx1Lx0vtWsdQAcjEYe6l7VHqUFbgcjcHAYPQIIgi8NauIxLhxUOQnkJo1mXO/e5w2N9AAHA22\n",
+ "RlXXsFU92TGe3GmYdLlI4OC3IklyabPhxs95veQzY6n0a2BnyANXxWrQG1vVVVAYgtb88NEdo6By\n",
+ "gCh1aEE1VpUTP0of4shaZpNk/2gd6T34r4uIClLqdADAAdaA4/epPc357p2Ro8OkrT9okATGaQDM\n",
+ "AYBiPC2kAQBkyn5ImAAAAdBBm5pJqEFsmUwII//+tSqAF4In0o7iUdIU6DQAMu59v/f4eNbK2my3\n",
+ "LFfU4bVvmOXvurgANJp+yhdNshfKZWyf1yiq02eNo25TtXkBg+c9UZquU5KtxkSr2wTyRJb5fWbg\n",
+ "+NL8Fosje7XYkSxYEiB3sVwPhHSvNWh2d4v6fN1lP9qvuUnfb1Bn+TdruqmJdM2vx9efbO5Th2CP\n",
+ "KiH3jeuRzoCzSIUG7cY38FVzT4nUIJdz+2KjjjJ0E7ZNKQ6lROaPqjFN4utrXaZfqGFX2nWmlL+h\n",
+ "PxS7plcEcSC1oWpbRWphWgodqD5c2VmFV0yO9NkxWYeDoEeaPVORAB/gqWAbIHdoZVHMBBV6fLyv\n",
+ "D3u5FppjGB4tzB+WC5jnXJKg0Sk3SkInESay6cwWUVJt/G4Tfg6wbMdEkCvCKlRosg/RTpp5P6wR\n",
+ "Z2iZfctuN2EQi36vtriULh4PVI/bw9ZXWlyhMpAYPlW3C1NvZrlJMNaSqGSSnh5cJMfrxHquXcAN\n",
+ "CTgojRhZ3tMe14Ny/HV3UfnpEJgrqxN8KZxlRpYS28Q96uqEu6NBBsBIIz0ei/Mg1x57c0aguL4j\n",
+ "dVBDXATm12Zi0uXfiRBRiIror0O2CDrlUQAAAPNBn7hFFSw7/wBgSQL3wIE2Tv5B6OJXPcoXMcSb\n",
+ "cE8qv/1v/uy5HaAJNUQCTSWlcVovOwe/GLZOdN2BNEgb1OlzNEinzyASzg3GuZ9zFeyJHe/zvxXW\n",
+ "qHgQlhmuH8QdE1M1s5tXy5mwAyoAiCrzupaN60ez6jWL/yRvGdGiPt3qJJLeMG60zAMKa7QhUJFJ\n",
+ "FMWUFrcLW6iQXx7VTZR7Qo0gz/aCe+BxT2h34J4bdpQTH59SHjOd2X4DMr2kpW5buE3EQBEKSUD8\n",
+ "yEiNy7MVRtsZHXt1V4Pb6TljTGXtC9pzGwEXtgadiRP8dhtDjxgpVN3IyoEAAAFOAZ/XdEN/AHkx\n",
+ "u7J3fsEfo6cXtbkNOd4swcOB3voAJyKHu0c0/MGiiYXv+2wca3XUwSOEG+s8df2rHPxj/J/Armyt\n",
+ "j86AAAWOWZsl8AgjGF9fWv1mQf9jrWNuA4APvfeLBFbZJZm7otp6Fc0DFqB0XCbEvLTkRU5ySc7e\n",
+ "Y4CD3ziWyxgWkLgxNxAV0V3rzOqUGhFxcTbBCJI75knYyulzgB9+SazwgLVSR2N8nND844Y7GLCN\n",
+ "0aeRWZgNIAWJkPPhP1VnSRo1jOpV+axgAXL8ExpNwIvLk+O8lekZ0/1o7sI+uJ46XyI2SuA6uJHd\n",
+ "bwUKNMI2qDKAM6f4kKlJLSQWqzXAi8hAQzI017i25Vpi5npQJ4TsJeyOHRvmO1wY5ZnIEZHyhgB4\n",
+ "IoLWrdA5opbAou9XxH6m1F6osqepeJLd97Dr7+5BqWzoHoOLhOxNwAAAAQ4Bn9lqQ38Ah1fDGltb\n",
+ "SoFNBABy4LNe514R+dnaDTYn5E46OmsRrJgYyAm1lSXdflAXI1+CFQXE0A4eKb0poyZSLaaXfRBJ\n",
+ "r/tA3jW8xYt/UxFDszVrqnPHP/Ny6pw3mJ+pwWr+YYAHxNaLyZj85nxRNPFMUkOr96iCB+MslYrg\n",
+ "cr/vUoZCrrFka9nw08yFJlyN4Ky9KHUYJOXDrBIiz8KQQaHFalCe3rENKk9raHLB9E2PdI37xydW\n",
+ "9R3Ktqa3KW5rMJCOoArO2/3trkkCh+/FDlbsei4VdbDQ32DjCaAkDFjCyuqOJNsi8nSI2KDSRFCB\n",
+ "83l81kCObhPemVMTlMBQzSDvOtDFUtuVwHtirD8AAAFqQZvcSahBbJlMFEwR//61KoAWweTusUEY\n",
+ "AFR7WLigAceU/KgvW9LBBRTRioW652v1Xpv5tYMFhkRmmlUca4/8lM9NJwOZFgbdLq3dhRjr1SQ+\n",
+ "iitgTnIKVe77qt/yWy3INzcVxffYfGucVy2ypyvLSUZVvVzu37Ufe4d1uKQAC1EE3Wwzkx7sEK4N\n",
+ "QwJyCdTZZnLiyrlEXcLAMbB36CvMtmCiaP8XPpa1U2RaJxnBB9qYeP0+JCORflaC8m/hyWfMppd0\n",
+ "XeCFuAYTEakC9vO4HVF02QH4GZZigg7j7bXnvstEtP5QgYZViZcOoAaQGKtWm3PCHoS8mKWfCUk8\n",
+ "ZLC6z2a10V0U2DavVH2m02W1Lc4/2WzrwUTHr66DOaP+urnPdabeHdXruv1HJ087InGSipJtxGko\n",
+ "4rppNbdlP4z6g2o/ksCKcSZ76uS1diKM/39wzVYDu1tkCD1lomve9NoQwUToKqCn30PDqMAAAAEr\n",
+ "AZ/7akN/AIdka2XuDkeawxOj/BZhZtP+kNbRABb4RmWT8vSOMSH2HVKuz5/n3pn38gQM6YQqY5bV\n",
+ "v8KsLMWKt//3BpX7BUiSjA/GsXEpiGachc2o+KqjjRfujy3SLc+TvzNfgePwT9w0Jj9Y8j6ORxA7\n",
+ "13x9/iM5Lx1s2OQQyRluiOYKxXDE9QjNulPCcMLJFKpvAfnZmzl0pzzHw/ANcBEDhABHQ9ftCkUs\n",
+ "Q4pQOQF20mJ1++bXoRcUz/lR79ACwohpzpGuaQCknCVhUL3lnnyQzloB0PAIRq1VnOd+y8D18t8/\n",
+ "IEva3L9FTrRi90eT/2pNxjMaqrOmFzrhjd2kmSd3YBlll+A3KrjDn/HtXx8SDjztM7Km7BEd2LVO\n",
+ "U1pVGn0+C8gCov9gxoEAAAIMQZvgSeEKUmUwII///rUqgBet471BV4xl2QAFRvb+6Uilj9hVaCt9\n",
+ "oXOXB19FM5G4bNDJAOl9w7HrxMOF2dPOUf977Rp9NoBObCR9cN42Ht77Y+l36qfp5SrWPFz3DG9k\n",
+ "Uks1s5yfRvMME5RxPYk9+qohbe5TR7z2WNWBJjaTvhnu4485WU3BaTyIbA4BRRdj0/JwsbCXRVZy\n",
+ "OMmFdXnFdxhNGZ5JMCQy+ip435WTv8KevLzG3OUTxX5d8x0gaiQZdaPwNC9GVrgmtqTc0z7He5Hx\n",
+ "p/UnXiE+WgHU095CwXga4AbeOtQbj0tjxKUoS9sAoJ5fyTlHv9FnU0ujgUuoA3Kj0ma5qF69zgnv\n",
+ "MTXEIqf8zuYuInk435YB6s5Aa1W77q49/ZLR70JdKU9F42nWnuaGIFvaX8JNp0NTGvA0s1VSOWIl\n",
+ "YVdpY6hSPbDqLYXO/LE7X1D3sWpexh+/kcA2B6pYDzx14bD7OD1f9pMDWxIrW6BpNH75M54gOMY1\n",
+ "SxoTsfh6KVoyFK4Yqd6lPKCLY4O17tm0vzqLEva8zNeuM7b2yHKwMHpqK8FV5yaEer9Zd+uSgIqd\n",
+ "eftECExc0GDPrda1mDLPyRR8iDjZRvRS/EElnceTaWiUEonB934ThxItQqnJINdKSyNdNwx44Jgq\n",
+ "H9/Zh55FLA3sdVDr+1aesKMfNmYnbwaje7GN0y0AAAENQZ4eRTRMO/8AYEUc98FD5/CYkGD6VZTK\n",
+ "7qaMD8JeD5Yvz1s+LaCSFWcn3aLtkXWLu76WBTjEp2boTz2lISGgYIiIhTqGBdSAvn4GaApcqQ2+\n",
+ "sy0LjwIg9aZXDdjP9AWFTV1H8wY3dWCf+Rn8X8p7dsAFRxXZ4015PG0t6STtIq5DOqARSPJ32oCq\n",
+ "OenP2L2rQhT0bU7kBXZqDOvuedMFko4K8dbR3EOKtstAjt1gHGNubjQIVeNhJsdrdMtXEY7juX3P\n",
+ "NuPteAILXrR8S3R5mIOtuZ+vWEUdS+Inr7FnZsbQiIv9i7KDzU2m3LJLNdjmArFBBLgFXYHDvQmL\n",
+ "9VT51Mb8gx1TyNar/CPWDggAAADyAZ49dEN/AInJdfYNr4ilmYSAMFB4GADpypoeWWXE3q20mGL8\n",
+ "wfGmH6ZgcbtTXJWZn5/uB2IPeQFG/rqNYZ/bmIUcKhccFRuPa9wOgu4Qnm9oi81y+ChWQK1KoKDK\n",
+ "TWWDeg/SDhV8w/q9dFY0rcekgnjPKbKFgzK+IO7hoMF7vhpMoVCqvwMtBaesBfF4bzxIufyftMba\n",
+ "VRaJWuZpM22/FtH8FxujQ6EjGNr9PHZg3rsxXbkYHRqZvH6RGypNdfKRL4serPMKtCeuCWEKaj1Z\n",
+ "h+pr+ULdNvwpLLHfA3OCu3Ql8v/sLDD/O1LVB9ug+l/wHpAAAAGVAZ4/akN/AInJdjcgUcZACEqh\n",
+ "GvWiTtr19IbQdv8WE1dBOa+lNipi00vM+C9W8F7IDH0aaS+KKFaekfOwUNG520lVemVKNYbjnPl7\n",
+ "LimE+s4N2NJ5SYT5+XRMb+vTvKCkG/By5wQO/WbZo9HorEm10+Tu4CVIj+2Ky5hDZl+kA6mkBK7E\n",
+ "3LwAW+4rGYiO9JH1BLFQj0ZOJq0ybrdVynOYOw8TudsCI+I3fiT5nmYCkIO1N7h++s67fASBLfgP\n",
+ "CYo7yLNwfifRM3ay+JhoRmwX5tGJ8l9w676Zo1wDaqZ0Q5guAYSxSJk2jHShR6LxlZmIVJnq7S00\n",
+ "iBOM0mxomzMhjpxeX6zqy/aA2SEREi4ulxZsEvlIWhLQ5YFv6LMkVEh9RITRQOsKGEls7Y4eSRWc\n",
+ "f23FGWOVxL2MZUmPGVh++Xygx19XCiXwoatt/s2T7zGfLkQ2IBiMKXoeDb7yiR4q+0v6UjACWT2H\n",
+ "kOIRMpG/B4KQPsfMRT0Rk3cAwV9dNnKm4XTlo9P9TmyT71B/Greq+KvhEBDxAAACJkGaJEmoQWiZ\n",
+ "TAgj//61KoAW5ktFwTkgtAAhBassVgP2a7WSOTniW7GlpUC5YARIimzpboyDKn/53KIxVBS+A0NS\n",
+ "3NuuWMzq53zfHvhoSdYO4dYooBUDN2VkLpVK3v3kQo1FoE02X3cyV2j6ziOTJORgWGzqU5k0XKJO\n",
+ "1VCPDS1gJclQYem5NlGAENmSiR9I8XvNQLGvpLGF/2+aU31xCZzIPp4tUxyLu/gVqq+6L5DezfDz\n",
+ "gPP3+vv4JFttE5Nyc7LysmCaQfUhi6zPymHmdLjs3bZdma4hV61UMMsGBNZfYf2GUkV1dVZ9kkfz\n",
+ "RyUYJPFdwjA5S++T8sc03o81MYXnXYkO9hGiG6RRLRRV2fPSgGhghnaqxRhYVQiuVS0ENIpjxqqc\n",
+ "KBEaAMs1VoaLKEOrNhZ8yB1VLLV9KSiM7/prkkNKRuNLp0WeTv2eHtXhIdAfhKb+ic7Pb48CqpOl\n",
+ "FnnbgphlxDaS1dplrA4VxMNzEL/27xNMQzhuRvnSDNb60j/kSJHw5x2JG6G/VwCoVAfFrZll45AB\n",
+ "Puajv4y9+7flMd/pR8Rg9UAn+cey+vNCcCbbn7FNSWq2hl9cymk4fwW6iqBgiFEQ7YZtyDoNCyYz\n",
+ "KAnW0gvHCg+5n6+qxC+xDS291Y4JfSW927ZZudU0tXxvupwcKf6fDXxz/bqsOMvxj6Y81+e6Dezh\n",
+ "B2/8nCpk1Qc7N5s0JoStEQ8+K2ir0vIXayhFQIgAAAEeQZ5CRREsO/8AZTZTJbuKD3PiQhYpzA/Q\n",
+ "3Iqsld8XUz3sHppFsAHZevvXPBLN2cIUd+YCbEEH6MplVFEcbuDDV0dnlBcrCNrbp3+CAOdBsr6h\n",
+ "0YfLGDPxHlFlUCi4qTS1o0TT2Jzkq8/O+TU7SSImG1EjEmOGpKvxjn7KxERq2Pbd/0y1sNHk5hiQ\n",
+ "eJwHwc7Z19aIrWes4h3UYQqHeU6kfCpUHVgnGubU2A0Xjg0UrouNSumFogz0StLk4fuhL5slF3Bb\n",
+ "3NpP7YhgiVLV0FNM21/pfbXvRQFzmliOaZuScgePqa02nvOdEHEpGVRPLCGL/tvzSkZqhXResmQg\n",
+ "1qZ/TxlvqjWYqPRThBIk2nP66jbd6NLagdWz1BtbrwB3TQAAAVkBnmF0Q38Ajz7dDL7wKLyRAA5r\n",
+ "u/5Co2KbB/AnQg3XvWeaImUuto8KuobiZ5Rpi0jf/+r5lFprj/mYxpQ5OwqjQqFG0eXwqi1D6M23\n",
+ "HLH/3LvgYXkbAAGr9uWkQaEU+TeJ38WNXodDC29t8Y0uYEpwNzyC6FqtgkCyDYDpd/nESpdVRRJh\n",
+ "15SV0TP88AKwZsT7yWH2r5gpJv8AhXnnWmKJ/WMwiS/2+Kf3ikj614P+BDohXhMYGO4GSZ19EkRI\n",
+ "RjwO1zoy3Umd4iOMuBBPzevAs74sU7IUdkUF24rNAstoyqnAUgY510L3SgPXbZmJYMv+tRpT7ZuM\n",
+ "oLxE5ACIQ+eHStmGZgh2P1nvrIaZRiBxoWZ1B+DDOtu5OZpc7LbajGP/oy8HbEFyJIcGXHGB5VXY\n",
+ "HnskMmabuu5xyFIJcVaqbGg3TlqrbBE29OX6xO7K38oavU/okVlIM+AAAAGEAZ5jakN/AIdXv9ZL\n",
+ "/wCpeCQF0zyG8897iu+TVNq8xXl3pE8eXm424VBKoADmOQ/RgBgC6Y0IzpqUKPVKwCZafdEIuhUv\n",
+ "zhgtxewRpr3F4VdMy9NUqqvPfGroLPxDW64Af18RtCEv8t7amX9ezvEWK8AgZjHjHXeVi2k8dp4r\n",
+ "TuMjdngEOGe6y0V0qXE0vJudyGSblaiStnW6rV0e34JxbdN3Qbajy6ozlLfOkq7Wqx1iLXxa4foY\n",
+ "IPBIjzxdye8gOjZW7bP0axd+wppVHkXrrvuxUf9dp18AanJIIFv6MCm6ujRO2wyu4ZfSbZp/KVFm\n",
+ "xvxpBAJyjKSdCoPxWylEDyms9NAmwAADmUiy6WUOIsiAC130X9MRKfeLHi3miJh/YDGeINuX+P+e\n",
+ "NWBXxp3RqAzo1eISPcPztmgXUHCSN2VRpnCOFQoF4yyryK4v7s2U4a7V5e2sVJBhb7kguiVFACK3\n",
+ "rbLSCnWI4OCs6u017nghnGW3Juq0rF80iqmo5QCt19S62wAAAkZBmmhJqEFsmUwII//+tSqAFu/w\n",
+ "HjJpMYeKfGxaFh4NwH9VzFzipiNnWLhZf3lim8qQP0NcWviT9hCfSjxxrnYEE59yPQn7u6+tCr/u\n",
+ "vn8/iyWB73TxWIDTyqwOWzo0R8Wj7McP4QWP8yE0svd//Wkug5+3cHmcpP/ONbeBn+TAQ0VzErlc\n",
+ "2hXFLnmGW7EB004qvGi/S7JfG21T+V5Sx9Nre0PuomioWltV0uJSYiMg18UwZktQhoyeO+qpPgky\n",
+ "U9/xX6NUrUyAfCz03v4wSV58lpzV7BxftApX8ZGWBx2zWQV/YeOCEWbmbHqvN18Jd5FxK1iHRqe+\n",
+ "nBGg6SyBQEQQfCMxCo37AXM212ulRN9X2fE3P9HkhvkaOxQZ5AElyFJ4BlaM9J8bcUgOX6NS6Cqb\n",
+ "n7IHMcCIPjAIJ36atWVr0EheDYyrwatT/sRxqfSoF0RgoVqtGqstMXZF7XACu2N9LDV5Ss0B+mSl\n",
+ "kJJqGxc50wazbtpofP341QOLrRCoQigLO2IFkJyqTpln4FgoWIMbx8x6cKkFmIESXv7mZEx6LOrL\n",
+ "ggZa/EdzllkBPCO/+zBjmey1Y55MrbMpoidNDpdQ6yZ4UDU0ai3HtghNjtrUaVDC+dCrSCASLB02\n",
+ "bO819PX27qwUTWW1MCrVhUzQkUkht4Xa4bdnUW7zTudPa++EPxUMVY36vPDJoCGilCgIXzTOV6S9\n",
+ "OVTh4+OA6S/XkcoA6ZjbQLERX5kZSQMoFJs4bPot93titzpDSKAhc1QMx6eKK6Ol2IEAAAEkQZ6G\n",
+ "RRUsO/8AZUEFdKFRxHYcrgnLV1IJewAc5dAL6/Pr5YWcZb4ejev9b/lpY1ea5Xk1AlTe44c3rPkF\n",
+ "DXI6yAdEC7kxPh5StAse03AARSF2nro+Dr5bfPJyYF/ERJ9NScPmUIVihvTCsyh5qmuoAH9P7eCu\n",
+ "Y8rdH1hF/pTSa+Z1tzZc8gwGtgV/YsMtlWLs3VbLWxt2KTDW5Y2b0HA6zgNn25rXu72r6iiN5aw7\n",
+ "sjFipq/8rjgHE9K0EK2Opn+0SPK2Rbo28aoNdC9V8VxW1CpMNxKjFOs8YmQmJE6Qtkw+Uo5mh3ic\n",
+ "7Ng6Xje5wAF7a8Iyr8DMIwvMZnnVp6ilQ1B/LSGEPncviRIHH8w83Grtt0CsL1L2isuyMboY11N9\n",
+ "lxQPpwAAAUABnqV0Q38Aiz6zZgMl5b2XXQAXQ9yHCqNv7FVD9CxHdTnw5pqRTLAoFiba5ss3lqXG\n",
+ "QCf4/o32jzmzNKjZDN2ghdo3OS7n/NFKTMs4yX0NTqaEhdnVRvrbcGvcKo0NYMgzE8UNwneueU22\n",
+ "1vpuKbOkae4P82iS9XSi8TlOPcF8mmD+n9qfVTXzL4r0M/s5xxZempvnxqhz38EgmSM/Zw7kEyiv\n",
+ "giyuP/YjNhFl3FVcOSLiQTCj+F0nLUE7lia+UkuO/YNBXwUKZKD8Add8BG6ZTC4bD/RSktc7uv8w\n",
+ "NB82AXgnpuELTB2xZFOLAYJncjo03/3uAK678Cl8cw8fzlbnSpp5eUkHacCUtAY9LPrz/OMf2bA9\n",
+ "vBE2eUwrxz/W0Sg0tjzkUrpnJSF+xYsA2fgRolT6A0NA++mVN8PJVhaGzQAAAX4BnqdqQ38Aj1eg\n",
+ "HO2BrhbSJp3bjAA7Lyx/X3Tt1hQ2T/wP93u+Km2fQtCsS47kHT/v6cxSu0EEWzwOVr17m7uMIt8s\n",
+ "rOS2NL0s+wNbNsQiUhFGWcubxLdtukca9QFTdaQjRXuW15l7gz2QnuVPe/r9SLMinrQ8TAT7c4JB\n",
+ "GrUpwbYY2wvPKUw4NOIKdjGz2TGxM02Yhqm+YQD7nu+MPeXg/5dBf+XeKfPK+RchTbfnRfx28pUm\n",
+ "+MUq+ynmpWVmmfO3TbD8gZCbZRUeK4LOH5lP3nvVvkbZlQVhN5vPlxxNouZsDfsmprxmWrHzH3vb\n",
+ "E+c7VsDA88L9wCH+ZmQGzxFjyOQ8cz4P9rsZSuU8vQS1h6fmk4XXUosrmweEGKJT/Sv5qb0OG8e9\n",
+ "voRxFaPrroiqkALWSnA5n4zcQMwfY/xXX1aR5rslt9ItB406qJIsbsrkl8pXUe2CwOVm9B72bhd1\n",
+ "lqsCRNktqyPMF/Ek4JsxscPvDjbSqbQZL+uT8zjgAAAB5EGarEmoQWyZTAgj//61KoAZQB+OVG5p\n",
+ "SZHABUb2//v8PGtlbWZ+A0oGGFPTAdgmU2TFbsuJ6mwUCouNe8f1I2ythN04JSJ5lx+ik6KpnC91\n",
+ "1FD3eD5Jit+kJIg5holbnldcijL50GRMV+Tt0L65TPBxqSAUdrQu+eLUTHPpJCL4CV5RJau8pEIv\n",
+ "uK3a7QA/UMQ/nrDjeZ6jqf1BF3JjbyaeIc5drvnYbR6lQ0gBIzp/QRU9xrHm8FESnIe42aooWDJ9\n",
+ "bVMccs59QBQd45WisW0MXV7NFtyepgfK7biPJN57MDsWL2A4LYHAXH6f6In3GVsSrYQ2HUKGlxpv\n",
+ "Yf/Xvk0pBnHsuIEsslXTjxwTTzuRb2YT7QCJp6yHiUVL67n8RfvHMNoHfUzP4rVgPSXcPL8FOP2d\n",
+ "F8GxovHNOmsOSUyc+t9OZXQFF+4FJNSN23FsgARohBEJ3c1u0ax3ACLYlwfCd3/U1mT29ftZkWMR\n",
+ "uj01t9v2AGHvgKM29X2Vs/ALzLNDd2OM9z+AC4TlcpgcRujIhnjHf17Je/8RMBqJCZtdfrFmz6AW\n",
+ "Z/aNIv/p/WX6adpvStFWxoDAnf+Tai9COS20TO4GHDviQkpMo6tbNTk4tiYWsmvBNq5u/aO08r2y\n",
+ "Bs1eH2kAAAD6QZ7KRRUsO/8AZUj9pUTz7rNMoHjJ4gSsLw2wABNFEVCVBZ8at73oa3C8UmeDMVba\n",
+ "M3uHP8p2EFDXTkl9EiChbxZZgpuvefKfc50lYhoTJ/7H62X0Z9NX2I7S32WT1XJeJtD32zfVBu3K\n",
+ "VmE+30x6+W2pKnyMM0ZejDKLq8WyIyi+9rC0QVVyU0N739nDCyt6aqRfMfSdljqTnwOmgDB5pHyK\n",
+ "U8Nf/BZxnIET5uBVX/VcS4bjmT9sCYYwmAz5vBy8cv5J53FYPh0/wF7kP2myhm8SfTnmNtpTej0y\n",
+ "JjLbrdGSBUAu+lwbCsr/YdOCYrxvvrklZP4j4s5VlQAAAgYBnul0Q38Aiz6zZf6skuDOogA4jl3V\n",
+ "YKO0NncAuqtob34dJ/eVmQtCFk2jxP+6gBUwoAJ5d6wKEpypNd+AlIf83kNIAAC8trXyGAv3zzzV\n",
+ "tAa7kzCHOXS39Rxic+qZEHcHH0Hx0iIZnH1UNeoS6dQYQqolDkQpOXG8nP6tDCpAEYSQsJzo5kch\n",
+ "Xf9jICMUCBjMQXeVS1i3FdA07mrKCBowVzEdee9WvqvXV7KuMTufiL0hA8BHvtD6VFvEZ6eiqgvN\n",
+ "8RNM5cYXQ2i+4Lx4R2QlAIN1NNxqM8GvSjSh/rgipqY8DwHJh8p9Jbu0Zs+w86pgxJN8m/cvWxRZ\n",
+ "yFAtI7sBhDbJnNXx83ll0o93YVJhxi0TxWXPf6PlHZeEyvr6QOF2VVafQjsZUg34P/p6tj3lkAer\n",
+ "aZouLIrbfbTrpoGdtXuXR2qC418s780GZsUBVTlvppC7dgGYqQzB5daoV61BoiIg6tQyG20Yk/Ib\n",
+ "TtwSJmeU5Eiu/zRo0bpbU2jgV79WVCB/SVzxsmoD1jJEhzN1FHxsbajOijl9Vp76GofsezNr+37n\n",
+ "UWWhPPzCk1rCLQgaI34ekcMUWq/vBK2WDe7wKACe/5M5UglN5Ct9Orsd3SfYPc0336usW56marFA\n",
+ "xW2XgVLc1GludnoFyQrT+oASHSl68jJc1j3I4WTIeU/p+eW8RtUF4AAAAR4BnutqQ38Ai1egJmdK\n",
+ "YqnGBlYUAF9obzNVJ+s4Wyt0Rq0YuZmzKSClvCu/741bUzMW9+2RqBxHf8xROd9WCD2DFO6m3iiG\n",
+ "ZOgLMC6WQsGlrWDKBATBQkW8M70y/ztO1ZzNQj1ow5FREW75+T8qWeYnaEkP0sDPfhS/8A++EHpT\n",
+ "ONUZpoNHugOpCj8EFvE/MnQhkWbqDB+V4zYJeD+V1h9PGTTPeM5Ykyq4ZMi+8E5Gka9dd2CFXMaQ\n",
+ "M99mRo+FOH0+y87A4U4JusoMgrnGwBHn7tNdR1Jgk+wKYqmIwBj2jGPnQFJXhHhE3ZkpIjaeakM2\n",
+ "8MH5c8xC359KRjK1nfiZHGSkxS98YPps7lGGiAJ2WdM/l0XaVpItX1VPHy/wAAACGUGa8EmoQWyZ\n",
+ "TAgj//61KoAWNzc2A41R+LAApun++OIZUz7EikV/szjfxvYPLx+f9K2/F/he8DHawkBMdV2wRLxA\n",
+ "t50GIuRUSWE/39Xo4nAQqkjDTJdufKMgNIx0erMAcY2QA5ejjVo1tlzncJOxCqGpuGwA+5/4IKyu\n",
+ "bmTzdPecTw0ZdpVPq5j/sb/uUTmyS5oriK2QJUn4uMhurpWU0pM90BFHxmx/55iJQnC/E4AiRjGv\n",
+ "TSfvy9eol7L6q3/AmWDGKQmta5h6TQecJSS7keMMTmFMkcgh+dQEUTFbphGIZpTz6vxfkWPPyqpQ\n",
+ "VmS0gectGBeLssajkGiu1ivhXeMUvGnpqjpc6XSD8FJ8sVdfwdsse9JozsVq/t5YFq5+AnEYcopl\n",
+ "mlIiLVwif6/glDa/FvPVZyUrYuYY9L3TA7eEHe1IcHWSOPxpnafEFBrVGoeZPrbfymiVcHOQ/3CX\n",
+ "aGrpVwdWrmOHr8jLuajUxWOW37ajHobcyT1hYWMxRTx80fZmsfvsrNw/Nztdx7LidHGE8jPZ4gQZ\n",
+ "DABlByR/bof6mTmjqkfbsR1PCXy4RDNnn9nCnaSnb8pCApsF6YsDTv0+UmVzx2ZPSdm2LhZIqOim\n",
+ "mhiXHWt+ZE1dnYkLwTdsgNYEeAUTjY5XG25CAykSMfKGwGWeeOwqKmLAqTmb7mCXXxxpy4+bbELo\n",
+ "RAxOLFOR7z+Rlt4VIVMH4QAAASRBnw5FFSw7/wBiyP2mEJvZyVx6ACpM7CM8ZBKHKR5j7ndOem+L\n",
+ "X5lQTliSlHrc19blDxI+BarmPxVVRFr/CorqLGvI+vHNUfF9L5rOth1seL+LchCRD6bYXJMlctoQ\n",
+ "KBnrSfN8OsFA3rCX0rxhgXIKgdEDuCNRYd4XCiw0AyO8VPwgQ3UKQOwN4T9AdwOVZht3xWSjlGSY\n",
+ "LTfR+DOcni9vpFUI/V99yTFNeriW/Ezi0Mmb4Xp+UrrTAn+/oqePQryHATZ97i1I4TzdZJ6ol421\n",
+ "ZZiGDIa6I2z+mz36WJISXYfn5PcaqZon5evy7wkHdXdLSXQuyy6RoW3UMK1kv4eYGMx6MEUBV881\n",
+ "1DxJ4Az2tfQhJ60iq3lK6xGARpoGTWiGA3pBAAABAwGfLXRDfwCHPtdry+v+2nyY2Sk+gF5YW5HN\n",
+ "XoAL6QRR4alJgXnPRJGLu1H/XzBsCOVwj2OHZ7/Befz18ioG7PdTUWTo/DFmzXwFwKSHq5MESJ/K\n",
+ "+czoaBaMU0SilMUvvgF9NaNkzEcYOJjCpUUkl+lvc9iWY7aNcNT0YkO2YuPLl1ZJa6XpXyzgvJfC\n",
+ "YABMMMlHP4hWdgac8C4JyYJle4OEiXwhanMhhDIkpZpmZqqPP6iXGzuSTb+0ZDMJHqoDGqJmkb8S\n",
+ "IJuvyZGNE4panvJTPVd9f7g4/aXxMPm3Cn3wfT3mTthI056NzanOEWKjM1qGy4olpTOi0cV3zUKu\n",
+ "VGl1k7sAAAHXAZ8vakN/AInJcXImIY9AsY+/nZAB2XUf7nMR8KlDfCSlxubwbY5yyAvaK6FdhjtI\n",
+ "iTEMX/gD5nqi6yBjPV+WgerMVdQiwmsTWCh4ZDRMTEvRNiTK06p6H4BM93iWfwAaKh8Gz9Gaukwy\n",
+ "InHLEZ0yD1XqM2twrrM9K/zMIWUOeN0Z6Qpdges4mCaPjYBUMA0KTxEuHmES85gUYlt0s0Ks9Nu+\n",
+ "2hfyb2t0rmyvRs70WgBBgYrdeTZMCwmoCbRHPK4oxsSlCang/p1gu/DmbjnwYRln/v7ufz7R3gdP\n",
+ "Fr7XrHKEZc+f98DBxQMF82PBbmDGtLAQXHwptz6g5mqHfaJhvvgj78jkqTGrQ4WXMBaKzHGNvGYe\n",
+ "XIR0bHtcMMQd0uz0UHs+NS8bhlZ93PGBn0DI4S7X4qFOiND2PCIg5ogjbfFqU4Kuh5oLH4L3vi2E\n",
+ "bzWP7DaofhwjMqjCqAvZAgznNJDsvnJzQxJ6Pqjj2ny04t1drdQRUisSLN+PcLenLQZbe401Xg2H\n",
+ "yhW845ouHrITGSqb9EOEeoN97gj42PjsdYRMVLRDVvCV2BOAqdLbEmICPHZnyy75qPsejK7duPuc\n",
+ "fJ9rEnjynB/HxYz7zf/RM6xyYbzIoc3AAAACEkGbNEmoQWyZTAgj//61KoAbj1lLPyvb6PAZgAh9\n",
+ "7f/9/gX2SHKs8Uq31kdycpXc3bf6XPCYn1E4Nyshm7SbxYTXwR3t77AgzFtBuE6fBgZeY48yXmAW\n",
+ "rqOr3iMlgArjVOjemrjz47grY/T9rKmhvhaqPi8pvZTzkzZCl+tV6nzXVbBFw15yZW9xk2z611V7\n",
+ "GITjv5GH4Oi/06B5IbjEMVKEcRpvt893HwIyUBXniM9I90uh0TBxOedvsxxE2iLZsr/m/GNXryb+\n",
+ "9as6btju6GU5FfXHAHKy97PxI2Rac5Rx/FoPiuKEecRx7EQrDfRmlggPPP63oMY4jkBeTzC7Drwp\n",
+ "8ik2Z4rhoAMWlcRPfXCI56oe4Jt09oRInuaD3ww9/jGDjhHIXGbNYM/s5UG1XuYLCqaLxESIyPG/\n",
+ "eNnETthXX/QZDvDCFX3YINANkqDvHlUQ+vcUvksaWF/g1aVcMu45c8BoP1coWBAVWVE6iyDMwfYl\n",
+ "RYTcnNfp26mpOfqiSJnYH+AFj0qGJttgeZBuJCzdV4F5EDreo0WWAiq/0jdXljJ+ZxDij/UazQOM\n",
+ "0ct15Q7rTOqLKy+lpOVa/koSWj06e8eyy0wY1FBSVaROGYbDgXze1QzYiVyP6+WTk1fjz+Do+J+/\n",
+ "TxVlHJsfUOz0tbPJ3R4cSjRVigTxPg9VAYynpzzMlIr0/pCOGd4XYyl3SGTwAAABOUGfUkUVLDv/\n",
+ "AGU2ltMhgssRVFnYDYHdfwUIOpARUIP1pWfDHpU2pf97OTOpyP7SrW+j72yMHgCy10/KQJvVenOE\n",
+ "eMrSHUfyq6lVIsdEDgl0M+/NXx5VMpg+IZB+I7xozsY2f0ARjiAjA8ZSqG32YEqaGwpGp+vfKL3P\n",
+ "hav1CfnyaUmopPCa0Y5ww/PZN4YINPOwE+Gg36kaKP/ME/B0d8v00CzvLXmI8pIa3TqrGIa7PF4X\n",
+ "8miGO6oXkRH45ag0gFdgkGj+BD1PvtIptIkuqTa5jzG/NewDN9cCfws/hjc474K6NoCTyr++7Tth\n",
+ "LSIM60DcVje0csuhEMwOmCNob99l/AJp/9hMVsVsEaxUNsWBZFMKnZoLJU/ljkNlTtF1zcUwJoZD\n",
+ "oLTT6FmWVzlFnyfjiJdVIqMAAYsAAAIPAZ9xdEN/AI8+s1VkrBucudR5tN1L4cUDsugAOgW+6weD\n",
+ "VD4WeLhja/JOA5FtORnuW7CfHWfWrXcPJlwit0rQdaNL8wYmpMOBxVMKErdopYTnWfb0EZST9ZFP\n",
+ "kGeAI5wBNyE7pmk7U/hz6/Uncd5yONsvInzdtLdlFGIUuwPsZsiC4nxcPKJ4ER73zqMcPC62dMwB\n",
+ "YeP2JTSzcWxmsY8AuUeSUMff3wugzCWo2dZWIqj8MEevc9dnI6e4RX4rfqOmeKfJ7QFxuPllAOzz\n",
+ "FkyERujhdmr2mdRExctZgI01tg+iF/NwBCqP+hQ0BZaq12BgDPwBcWyuj8PXGo/75aroqbic3atK\n",
+ "78lcQoP6TccBH3q4TpJbdFKZCXZFrS7Hh71ZQxzuADlZ8DDRzGHyvFJs8+7LX0Z3SVEeli/7hzNR\n",
+ "3en2BovQV52x/rwTox00ojUHS89/I6QK5rr9xZ5z1Evdog7ewBETCofR8FQPxE+2X576ofb9SYpa\n",
+ "RU+FFWJ4WPQBj/u1ljXdmoINHOgs90YcpGG37DHSgRaxKh3h9samVWdsr/7ZPH7Krx9nfE8zJoXc\n",
+ "5Frf0sUOO22BhUTf6MatKarbA54SuNAmIi3ejRZKQJ4XCjhpsLBrmw33yy9Nk6OT0LCi0ELysL29\n",
+ "OvbOK/J+/iRz4bP6v+/3ppYXG9MzSEeggmS96wm6yOsevJy9wrAAAAHWAZ9zakN/AIdXwVSZADwX\n",
+ "ZeAC6HD/yFRsSkP+ZT/GPlFXimE8PIk5/ho1VfL2NNL2pqViOd6YYnwc7ksNMs5IkNYQ+fdC2XMm\n",
+ "GpZcBQdS+anJcAkZpOHFxqdIo1pLhI3h3bcsWXXBd+BTXZhbA2JSmhm8EWBGqSBNaO0U3Qcdcea5\n",
+ "428f3xthr08dSK0oFN+HNErgBuKfL3JZNShDHaW66u0MaG1B/cF2Go8z1F6LGKUAmsy0D/C2CM25\n",
+ "q38c827dgYTnZjZnTFxlPuxm+JuWvYpOeWyy3J/wjV/USVL+4BKz61/Ccy+EH/JkQUqRmUOtvYei\n",
+ "XxTdexyug9nI6kyTGc2H3hy0C3uFxKKFKo9PfiwDCQWhQ1+vZIsII4FYexn+pQbkz5kmdlWKB5Lx\n",
+ "ONpNVggWvIuTYEFI34NTLTOf285YYkebB68ywIJ5f1uX/OXMZ5RxH3gjNZ8mKLNX9suvs06qOt/Q\n",
+ "e2ZfZ7Orgt/l3O7GLxwWvzugIsO88I1KhpZhgYDdYZ//1lVBcwG/tKVYjF1obqjtyFctY9LPGIag\n",
+ "318ehZmIvkhW9djj90e+pnWknudbQDv3Os17s3l7qFADdqSGqYyGaSU47a6O12HCRSwmepV1bewA\n",
+ "AAIrQZt4SahBbJlMCCH//qpVAC8LE+AX+ndLRI9AAL65x3/f4eNbK2tvWi3seP5qm31GHdf4edmk\n",
+ "0/ZKv9BuxjUGH/qoYxXDUlaWZFHb65x0lomfbckqRBtklU+1LGTmYtvnPAbKnUSAh/jTBATZpFND\n",
+ "l6V6ofQ5PTBcFjOWwgI6YqalXUkmqnN6g77O4xvodhM7XQWhsA44ADmvatn61wvReF9d9MqoCN9N\n",
+ "Twpkx2kbbrSoHJrSyqidCsv+e2gnLoWDEdLGn/42++dseweQBj40iKRQ7paDrpDRwTZVjGQJ+52c\n",
+ "gaUSUp5A/cAn4FgESmp/sZ0NpfD9/7ZAmCbSUfPUar6ndxZ3XG2DXWcNFu473rzFQZNpJnXg/Pfh\n",
+ "QCQDuu/iX2Vi2NjGs1QVI3BReUxvD8Z/YeLy6w0jDh9dcJGJdKoNjb9Epdy5r0lFeFb9L8AWhdEd\n",
+ "sGreMPdTiMRlq+JOqjdogseyQTcuDo5iesxIsb0dhY+P9VqSJtTxyPO42dn6TXPZDgt1vROlp+Ic\n",
+ "VTutbib7FY5U+jSckVQsLzLRwDuIoa+HpEcHjzuwHMaHrKVljgiPeRI3Afdpqx3nHgy0MFCOhGEr\n",
+ "Jkw+Dadh5qrWjCGOX2K5HPLV0E5qw7krTDhpWX8sTsYsIqvxr/V2EjIFiKwnheBvunmhlbHNUKTl\n",
+ "ykWRC9Afa8QE+vO8sLJHYNqVh5kOrsn0+NP1Mm4JPbYiahSDJa4o8TJzkXFBAAABAkGflkUVLDv/\n",
+ "AGBJAvfAgTZO/kHo4lc9yaSVZkgaxkXEQAgySaAqoJy8U1XmJXFaLzsHv4KqZnckX0gP1AYFUr5X\n",
+ "3Zof5zltHp7OQG87KhkyMuJLOz4diYjf3ctsH2KA3/S29L1hP4qjZ9kfgNEsjrH/nSlX3ikiiFcQ\n",
+ "/2mu5vwlzQMTIUj5/0pAslvbULpI2rwxcgfjtpeW3qe/Q0sCZXyJ3L7VhEaeyKZo/ALUAi114xdn\n",
+ "Gao6fyKpZhWohGCsI53i8XO3Y7Dq+aD4ONx4A265BL770fTZiNNw+oM7dwTK1vcPMdOTVjz4fi6j\n",
+ "bCMBPzMCGM7CsAz7OQTIKiUTlOi8YAAAAakBn7V0Q38AeTG7snd+wR+ioRwfka+slSBm7w4HiigA\n",
+ "mYoe7RzT8waKJhe/5/xyHdk2lI4Qb6yur2vWdYx/k/gVzZWx+dAAALHLM2W5kE06MD+/WY8W9vMg\n",
+ "jgsWx+NCob+sUo3r0m3kC7Z6vE5pa/kp8NVK1XizBU/gSaY6/S/NP+nzZeAUHhvnb6LPnQnTmhI7\n",
+ "+CLAa1UiK6P+lwPbKP0S0Q5RWiopmhls/AKTmwxXB+WRWyrrFglLMCCi/H7yBlZCPn3f1nUi1WXW\n",
+ "txmtCNftDVTPLfu3fbw+YSszpG0LQoe/d+Hn14JtNEXcVveVKgdRtrJ2SZSzkDZoD5uTokEopKbG\n",
+ "geSmsxJSe6mDenK/tstnSjFiozTKWgyJb1mTK9iBWStV+uPeceDypkgatRgkwgz17Zgn457UL8xo\n",
+ "RIb3Rzvhn1PaM6KKHv4wQMqvpqRXKRm+SScKgBhgUzc706tHx+sk3QXrFbfmTj3VwEqpASdMV8SQ\n",
+ "Rc7Pl7VdiwexHM38nPcgZguGyvH4NF1CZay1mT9d+wee9MfU3VHZJgMp057sUGFJIJZNmQAAASYB\n",
+ "n7dqQ38Ah1fDGltbSoFNBABy4LNfpqaOuQiA03rsvInHR01iNZMDGQE2sq9jRvjWYcCsjv8TgHDx\n",
+ "TelM9UgK8aIkbW5xZBO7YH31DMzHB/HcoCKmBUni45/7i/CIo8gF1pGPr0DAA7wV6D09MIgWLTIz\n",
+ "u2RlgzWHXLOhQSqpesq6gEgghz4eO+szzJWiaji2cgnbFYV7gS1iXMpBIisJc8i3U9gywhFgtGxt\n",
+ "IPW/7TiYEwGOLwxyjZX1HkROuSI8lAAdZBpungwbYVpPKSngzu3PnOIcBqes7c29MHD8jRPn7Zrt\n",
+ "720E/jZ4jB2yT62h5AEs+TCYeJmiY6lwGwXm58hIVqeMFafCwAYhd3vDCtfE6mymrvYwtLYQ0YeE\n",
+ "Ebj2MbA5+zEAAAFwQZu6SahBbJlMFEwR//61KoAWx89GABUe1i4OfaowcQHQyqHCv9PnwkHOB5jh\n",
+ "ZaY1nqaJvfgMHLxnx0HRU319XsFiIgZ3fycxZ7MoTbod+V6rFy2y2Qtld8RvCt0Ug4PVQuLFLU9x\n",
+ "N6gbeWntqj92UVkXYHO8rtnoyHbc5vkyDRwK85+1rEknOmV2fCPAJQWJQHZKzqn/akJ6R91HlWya\n",
+ "u/8GgP8q7KTtX0XyZMALsB3jT/UhmW5AlGIwNHeW1rtDiMG/Xy+69i+m2kTOjww4y5o0/8WfwLLR\n",
+ "RKlhEE1LYjJQjoy3+hNy7YguxzdtR0GOg0UsPQLFZIBnnCwGmFharg9MSkzKoZck80tBnNzVcu5F\n",
+ "Ot8W+bdDLv2E/9UTXci1RXlM26z5jearPa/9d/CciU6kElsImbzJ5J2YpzVs+pvW89XbvAJMExZq\n",
+ "wXD26iUkefzti1p2cc2CbM5qN5CGCTCmR13du1Y9J/JQwXkxhEAAAAFiAZ/ZakN/AHwUpp6Dymc0\n",
+ "2L536BR5shJlFypABdlGcrzfdaw/6f5GB/atQKmEnLjISTsAvG6zfbdBMs7bm2yeFrIQxXuK81kC\n",
+ "9pAAAXcBlvswH72knWeKBsU0Ht1g5h3YcKtQv4e82ah693wXobc+mdHgPA3TBKIFWUv/iM+/E90G\n",
+ "S/NmTeZC+lgt/zT/+HMt/QSFK9C1+AMdH9l6Wmy5eJzA8pumBNuqAArwclv8LW1AC9Ryj7J7dIqZ\n",
+ "2nhKIYQ08cavMFAGExrDHt7RiTs4Auer+jpijDT1MWhCFcQjNZn9nbOp1MdYUZ3batlHR94YKH39\n",
+ "SB9iaEe1H+vDrSDRsP3b0PfVLevCUtQQ7tTMju5YxLigI0SkXHby6oMGwH35DOmYdZ/QEHihEbbH\n",
+ "ljlaWypqm6TR7b/zNBCPoaZiHS0IlbTr/gzMbXxGasP7GssB89XtUV2jZihKJYcij8456L2VAAAC\n",
+ "WkGb3knhClJlMCCH//6qVQAvW48vGhnpxPcAFRvWsRQfCH0ZQNKlkI/Fmy/VFBZqjdqwlFWyRDRU\n",
+ "ATa/x8nSCThm/LYIboN0iejGj3Uchm8nyLv3P3+HOOnCw7+XGsyycSpaT/SKI8hu4RwjrdDxqaYn\n",
+ "k6pZ6qjZtX+IZ04XS8X44piBkZKHHklQnddyez3eJG0JjT0fN5b/c72jAD+sOeXlR6iPKkSUzu0o\n",
+ "3ha2oHN6UEDmISbP1cbB3piI/SHrisHlFNjIuHiEdkqSzG95tlcEE5RmJMFHyIZtmV+VUnHUg//H\n",
+ "WOVjyT0+oFlaS4c8th8dtoQJgchjo9u+OPpSDxEJgWI6zeeh28ogNTGzlwRqjfRSsrTItvjA1MD/\n",
+ "oBFhKLk5Gm5LLSkMpDHu9T5I2IaoH3PKDFRJp5FswrHAqK+C6EMiKJRw3UfQ++e71IzTL0xpDNJL\n",
+ "z6AeitOHT7WHH1q0lcaxtRKIXyzlri2FOeAU+zEh7DbcM3wvbzCPYrbD4ePmP1flYALif0DM+F20\n",
+ "woqO1ciEp6KvfcdLwkVhOi6HukmunTXGsruYaqjkaLT2QlUIMJVPTAaXGvEAsJSG/0vfsDXKkk6Z\n",
+ "sB3ElNrSO3yHej1aIEgW5xnCNisEQsWn6TKnOYGilPN4ZN8EB64V0F8PWNB9Aq0baX+T8kKesmFw\n",
+ "2y/668NRP8ypn4s+0TEew3V5nLH+An+XxWolypflMoVnWhEhG2W+IIgxfWfPuSgDmqBKtSemnfnO\n",
+ "mj2z1HJ4yEmqNoBjJwYnWfK8e0PHHb381Mk1zGGJOgWAAAABUEGf/EU0TDv/AFlVerlP4Rak+BQA\n",
+ "rfH1MAekqKZtO9rI3YpPu0XbIusXd4D2mikBBjNWCs5ZCx1/nIkAW78LpHSyCScRX686DgqeELvg\n",
+ "+6gjEvz9oPv/Q5SyPMBeMNrb/QJ3ato+Qw19nLJWjl0bduh+HilMsrklIYKHCWBaC/dNC4s7Xl/r\n",
+ "RCzM7ZJuRKmUY/D5sEAdr/H6TIVmiD0u2jiehC8y8Gw6flB5fdlWyz5ArpMes88RS9cHH1n4Dp5A\n",
+ "9YiKoxa6XsjMVtwy/Q1CE1CcjEE8nX1x2wi3FF+AiuFwqQsSRlHtfUsVksDBdXLvE8zjbyOIuIMV\n",
+ "pnJU22cEHHqRAVAAAQz/a8I3JUwtCYefKDlHQuITIdlhxtkj1S9/MOKY0At1R1tnioLMWN7HUVCo\n",
+ "b6XS9uoGwS6oOJgKcTFbR1vNa4wchWq0XCPds0DBwQAAAPYBnht0Q38AeTSjvudgsbkOLNHOwJSE\n",
+ "7MIAOT4Tae/DlzyAOhFcKHSt+XmND2K3krM1WAe1ksxoXOx8R5ib25iI4yoXHAvjcPvcDoLvQIYy\n",
+ "rfzkEj8FCsgVqTty2M7mcrrsvBMmGI/tSEAq1Wpq/wSUg2I4oZj0GjiChzewD+uw3YnWAi/Ntf5Y\n",
+ "Cv2dU9qEo9e3jPCavhxnj6HVQyqcvxekJ6cEcAGQvRh8PwiQyys4LYMz+Th6jmnZO6zDQlY1h459\n",
+ "aXiX/1NPDVjhvbOibPxdXy1nW8ZFN/ZpmMtUtTAz4mvuGfLCJYTZv8r0n1cztBPRieehovEAAAGy\n",
+ "AZ4dakN/AHwTrqiSAEDVZr7cfUIfCi6SEtf6z4BBmn/qEvCbGFYoG0hJzipIIEfgPxGLOPb5hgYo\n",
+ "3EqlxYfhyi3ADlPB0rSvUe/2K1c1bOHHkBdbN7v2fRCe6cTgBUViIyBzKbW8+YVzs1NjLsftvDLF\n",
+ "Jws+AVbFUOsz2XZO6+tJqS4okplORVfI8Zh8pjE7ly6+HI7Omo301kEp6VZks8VHiVKJOuTRsuFe\n",
+ "1lak9cDIgZS7IV3MkEjdmu8V6wPVTOui5KhgRegdKpe7dvKwiZROacSHUyEpgoiQ49NAkgd9ICSC\n",
+ "nOG96XtcVUK5qLGXI1ECEXtJcuaFVMtCmmOBBiFL8jC1MpHbxQ+4k2qRSUjP3JvFi0NfrsxeXbrH\n",
+ "Ebg5vBmNpJE6T+wdC73c70xC+Mtp+wYFzu5kfTKcL8d+Nzu4GlIr338e6SWwNSpXRGjfdLp9o3Ic\n",
+ "2PzMtQmrlpbEeUDp1vnkaZoqSF5M9xanIk/zohgoPX5++NN/ebYvr56WROjUeIUdsOf6nrJlmboT\n",
+ "DZEat6r4aY15lVCgiz4Mpb/mqSazxzrszmdRYRxGsW8DnzAAAAHfQZoCSahBaJlMCHf//qmWALFy\n",
+ "5oM61QiAB+cxK4+jNCOHXw6RALujtnWF0llKsvjvaSIz+44BdTBn8Dqmduydu0Ab2yYLL8rBa9BR\n",
+ "bM/WBrO6FCt4pfpaT57HiAbORTevnWHgnUCdwsiqbddvhjkiuJYbgCMD0kEP1SURu/b2Z5hWsq5s\n",
+ "eIdJwlVUmffx/GFsHH2OVg2kldaudIzyWEsMXsnZccvZ4+1TTMECSDKdUtlhUW9AAgPUraaePKP1\n",
+ "hatMAsKbsEP5g1nzjTlmyHjs7FjRbwjKng4/qsqVQ+s9Z8Le9mq44VPerxrlkKxdRgf8PQXTEpxP\n",
+ "gMR8UP9I/vRSJBbzTafYsMhPytfC8ESUe9ySga0pNZKSvC+bN1h7zO9OEjqF3rsnXJU2SZN7NAbS\n",
+ "01WCPkWQIdWN39TZ8BwhuM2E1/XfXA9OxCI/7PAG40Z8M1rKVJPTY+iwZnIQA6cEF3rnJVasn/JZ\n",
+ "rircnzzi1JQr5NiwthCEkD02k7GAoyHtF8lIKArvw+GqH7Ox1Tpd6DhPPJm2hmyijeFH6E+9UCJk\n",
+ "Iiolc9K3UW1rmUlHlF/p9jHAvsiiJUpuG/KCfna2LEYj9yn6P2oNlWfqq5P2HNtctaJeVRZv9Qb/\n",
+ "mNVjyjAAAAErQZ4gRREsO/8AZUEtk8LzOoS4AAhIFC88oI10PfUAs3UxxCOOtSzHREgn4/jgVfHt\n",
+ "0r483Tf2Y8D+zGlycQw2lUV6Nidlo0k0sASUCm4dEwF8Hb0+IzseFE0dYexJdLqvhcI7IIUIH6RG\n",
+ "uv8cjTXFD8CTksvYGpGc+uBYXhlwc3/jHhNGtm8G24uHniey+Zy/NtEpSl5dub3bE324kx+/N1gF\n",
+ "sU/CxkQF6UQWvd6Br4nL+i2L6udCLqM/JAVJhScc01UR/bE+NX2i3upx0qofgxfWL8unNZ/BP9Vc\n",
+ "CvVXAtxPw+0JopAnWMlwtBFG9wd+oP4zOIJ88u/VEvyZQd0JJP1Y3qhYk13Deyiv0C1r6ci1z7CQ\n",
+ "UwYqgUT64pT/hlIvHeCzEZxqH+WbUbEAAAGYAZ5fdEN/AIteE+hbrZmAAHNd3/IVGxTYP4E6C+Wr\n",
+ "63le3xAHjzqOqEil1tIAAUY3LvF62/277H30QskV8sEjceHvPe7bE0mfZ44avBY2gS0AAAMByRDk\n",
+ "EKOyh31Y2H0mdsy+zcGsPrGm3pHtO2riBcgILxHO0F5398HG90hK8UgtDUfp9CQyPOvDSyEU4WTb\n",
+ "6/WT9Z3aca6tb4C53W6p8Geyjq/mwbvNpnCVbbqIcx1ZT2+dencovmeYmPlI7jrhk6KwLYEd+5gO\n",
+ "J2YeKk4iWai6BsaO9+Tb5P52jBVHcSZ+Vws5QhTxkBSpdHlWJRcbh50V4ViVltwUN//XNx+jx2bk\n",
+ "KsfglI41FGmS2xAJtr8ZhKDk1VRRL2tGsNB5nztuRXCFd8q4MIuVVWGjim0ntcxZ/R18mzJZN+sI\n",
+ "qKUvfsxoaeZp+oIaU1hLeXzgcHEe+3/6emdZeJWoDNhUqhkfWzWzVZbEzUKpDBS9AbVIA5KR27LD\n",
+ "3HEfRMw9yt8eYILg7m/Rm2ubtU8u6V2QuxVXq1OHry5oY2TAAAABvQGeQWpDfwCPV5unds/RGF4o\n",
+ "aWlq+XwTSVpG+igacFOApaqyNJIXSXT4q7gA4DkP0YAYAumNCN0MwD7HSEeIsv3Q3L9kZ2RagxvU\n",
+ "jle4yQq6Zl5W7AgdlZnaBngH/w8xYsqWx5t90zzi7s9VyRY9jaNshfxuJAZcRgFILNTmQNCPoCtl\n",
+ "wyo5Ht91VCy2qSby6JDLeTD096PzM4KOK7/I+amuefuT0S/QnDNs952oi11JV2mbadqtKDqJE9x4\n",
+ "nX/OjU9PBP1uhsFLNkjsz6ZHlTOcsZvWUxabbw0HBNFuLXWIYqtAYdWN7c/QUoqY2IlVBR//v+NN\n",
+ "Bxf/rxPv+9QlTTeUOAVhzyU/kQACorW+VEL2KFNUPF85LUxlbSGEYQv/98/fAQAu6hKRw3yoJoPy\n",
+ "tyr7S7Za9gGurMYseuvuasNoB+fPCmp37VWgm4yNZQ0LM+8CPtaQgShVMs2/RIG2cXksHuYVqEB7\n",
+ "PJtzP2tl8EYDen8RohIb2UO5d/Xdc8aoi/Nu4IzGq8ApuZIxjC5J9bUYtMDEDA6eChGKPjb20vqg\n",
+ "2PRBI2fSXJrcSROGTC4m+VsF+VagO1LnjrakndEAAAHtQZpDSahBbJlMCG///qeEAVH55ayIAL6z\n",
+ "9D9Go2JR/VsPgULYIy+HM1JNQWUio64eqKV59gHDbxQ77xKGvVi/RlMeepNHF+Cplpp4rKqgivaK\n",
+ "14o0jVVjKwdzXmYfm8QJck76NrSj9rXzMi3Th9DbQ5HQHvlFr1+Ft6fGVXaubVoF+Bx3J4nvsWO+\n",
+ "FhXDphKaWh9geM/3PqX1TK4zqhRL2wKgDCWdLvIi2s2e48RSWR1zksj0SjkMINJfgjA7wVj0dW8Z\n",
+ "NZGlcRPjgkoSgpomI+x9/l7dJ5fHEj4WOkMQMTJnj+KOqaXfgtXbhBachZ0Av1Z6rh+qw/iObJOy\n",
+ "7q2gUdlftEWI7In7KZjqqg18Bg+z35wI2FmknOyXdEiDAPaFiRrhqkKOLfgLssw1BdohiuTGWlKn\n",
+ "NvPL4EzIbAUeS+0qv5cFdXvRjnn1zOMYTMpyN1CZYg4pqjj8mGtGdm1F7w0Xo4Mnm3hRmvZyyOaW\n",
+ "yf38s1SCwyOkhQcwJhrAAebvkxMWrAUWrTq9K9PdCUqFbMVB9+93aovoux8zBfM/WLangtLLXd/D\n",
+ "T9TcgY0eosWGZeAhQk2sxNC3bgvMT328AT2T2XCg2nG4jsOakPWfscwbc0zKfItj/1eXvyR2tk+K\n",
+ "fpgdg9dJ/OdcXINTUAAAB95tb292AAAAbG12aGQAAAAAAAAAAAAAAAAAAAPoAAAnEAABAAABAAAA\n",
+ "AAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAA\n",
+ "AAAAAAAAAAAAAAAAAAACAAAHCHRyYWsAAABcdGtoZAAAAAMAAAAAAAAAAAAAAAEAAAAAAAAnEAAA\n",
+ "AAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAEAAAAABsAAAASAA\n",
+ "AAAAACRlZHRzAAAAHGVsc3QAAAAAAAAAAQAAJxAAAAgAAAEAAAAABoBtZGlhAAAAIG1kaGQAAAAA\n",
+ "AAAAAAAAAAAAACgAAAGQAFXEAAAAAAAtaGRscgAAAAAAAAAAdmlkZQAAAAAAAAAAAAAAAFZpZGVv\n",
+ "SGFuZGxlcgAAAAYrbWluZgAAABR2bWhkAAAAAQAAAAAAAAAAAAAAJGRpbmYAAAAcZHJlZgAAAAAA\n",
+ "AAABAAAADHVybCAAAAABAAAF63N0YmwAAACzc3RzZAAAAAAAAAABAAAAo2F2YzEAAAAAAAAAAQAA\n",
+ "AAAAAAAAAAAAAAAAAAABsAEgAEgAAABIAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n",
+ "AAAAAAAAAAAY//8AAAAxYXZjQwFkABX/4QAYZ2QAFazZQbCWhAAAAwAEAAADAFA8WLZYAQAGaOvj\n",
+ "yyLAAAAAHHV1aWRraEDyXyRPxbo5pRvPAyPzAAAAAAAAABhzdHRzAAAAAAAAAAEAAABkAAAEAAAA\n",
+ "ABRzdHNzAAAAAAAAAAEAAAABAAADMGN0dHMAAAAAAAAAZAAAAAEAAAgAAAAAAQAAFAAAAAABAAAI\n",
+ "AAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQA\n",
+ "AAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAA\n",
+ "AAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAA\n",
+ "AAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAA\n",
+ "AQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAAB\n",
+ "AAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEA\n",
+ "AAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAA\n",
+ "CAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAM\n",
+ "AAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgA\n",
+ "AAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAA\n",
+ "AAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAA\n",
+ "AAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAA\n",
+ "AQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAAB\n",
+ "AAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAHHN0c2MAAAAAAAAAAQAAAAEA\n",
+ "AABkAAAAAQAAAaRzdHN6AAAAAAAAAAAAAABkAAAGhgAAAl8AAAFjAAAAvgAAAXYAAAHzAAABDgAA\n",
+ "ATYAAAFIAAAB9QAAAOIAAAD6AAABWgAAAbAAAADTAAAB8wAAAN4AAAH+AAABEAAAAOIAAAG2AAAC\n",
+ "DAAAAWUAAAGkAAABmgAAAckAAAEdAAABfQAAAPMAAAFxAAABIgAAAjYAAAEmAAAA5AAAAXoAAAH+\n",
+ "AAAA/wAAAT0AAAFnAAACAwAAARQAAAE3AAABTwAAAckAAADrAAACFwAAAP0AAAHzAAABIQAAAOAA\n",
+ "AAHKAAACOwAAAVQAAAHFAAABugAAAdQAAAD3AAABUgAAARIAAAFuAAABLwAAAhAAAAERAAAA9gAA\n",
+ "AZkAAAIqAAABIgAAAV0AAAGIAAACSgAAASgAAAFEAAABggAAAegAAAD+AAACCgAAASIAAAIdAAAB\n",
+ "KAAAAQcAAAHbAAACFgAAAT0AAAITAAAB2gAAAi8AAAEGAAABrQAAASoAAAF0AAABZgAAAl4AAAFU\n",
+ "AAAA+gAAAbYAAAHjAAABLwAAAZwAAAHBAAAB8QAAABRzdGNvAAAAAAAAAAEAAAAsAAAAYnVkdGEA\n",
+ "AABabWV0YQAAAAAAAAAhaGRscgAAAAAAAAAAbWRpcmFwcGwAAAAAAAAAAAAAAAAtaWxzdAAAACWp\n",
+ "dG9vAAAAHWRhdGEAAAABAAAAAExhdmY1Ny44My4xMDA=\n",
+ "\"\u003e\n",
+ " Your browser does not support the video tag.\n",
+ "\u003c/video\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f84b2253b50\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": []
+ },
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "import time\n",
+ "import traceback\n",
+ "\n",
+ "from matplotlib import pyplot as plt\n",
+ "from matplotlib import animation as anim\n",
+ "import tensorflow as tf\n",
+ "from tensorflow.contrib import autograph as ag\n",
+ "from IPython import display\n",
+ "\n",
+ "\n",
+ "@ag.do_not_convert(ag.RunMode.PY_FUNC)\n",
+ "def render(boards):\n",
+ " fig = plt.figure()\n",
+ "\n",
+ " ims = []\n",
+ " for b in boards:\n",
+ " im = plt.imshow(b, interpolation='none')\n",
+ " im.axes.get_xaxis().set_visible(False)\n",
+ " im.axes.get_yaxis().set_visible(False)\n",
+ " ims.append([im])\n",
+ "\n",
+ " try:\n",
+ " ani = anim.ArtistAnimation(\n",
+ " fig, ims, interval=100, blit=True, repeat_delay=5000)\n",
+ " plt.close()\n",
+ "\n",
+ " display.display(display.HTML(ani.to_html5_video()))\n",
+ " except RuntimeError:\n",
+ " print('Coult not render animation:')\n",
+ " traceback.print_exc()\n",
+ "\n",
+ "\n",
+ "def gol_episode(board):\n",
+ " directions = tf.constant(\n",
+ " ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)))\n",
+ "\n",
+ " new_board = []\n",
+ " ag.set_element_type(new_board, tf.int32)\n",
+ "\n",
+ " for i in range(len(board)):\n",
+ " for j in range(len(board[i])):\n",
+ " num_neighbors = 0\n",
+ " for d in directions:\n",
+ " ni = i + d[0]\n",
+ " nj = j + d[1]\n",
+ " if ni \u003e= 0 and nj \u003e= 0 and ni \u003c len(board) and nj \u003c len(board[i]):\n",
+ " num_neighbors += board[ni][nj]\n",
+ " \n",
+ " new_cell = 0\n",
+ " if num_neighbors == 2:\n",
+ " new_cell = board[i][j]\n",
+ " elif num_neighbors == 3:\n",
+ " new_cell = 1\n",
+ " \n",
+ " new_board.append(new_cell)\n",
+ " final_board = ag.stack(new_board)\n",
+ " final_board = tf.reshape(final_board, board.shape)\n",
+ " return final_board\n",
+ " \n",
+ "\n",
+ "def gol(initial_board):\n",
+ " board = initial_board\n",
+ " boards = []\n",
+ " ag.set_element_type(boards, tf.int32)\n",
+ " # We are being explicit about tensor constants to ensure the loop\n",
+ " # is not unrolled in the graph. This may change in the future.\n",
+ " for i in range(tf.constant(NUM_STEPS)):\n",
+ " board = gol_episode(board)\n",
+ " boards.append(board)\n",
+ " boards = ag.stack(boards)\n",
+ " render(boards)\n",
+ " return tf.no_op()\n",
+ " \n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " # Gosper glider gun\n",
+ " # Adapted from http://www.cplusplus.com/forum/lounge/75168/\n",
+ " _ = 0\n",
+ " initial_board = tf.constant((\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,1,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,1,_,1,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,1,1,_,_,_,_,_,_,1,1,_,_,_,_,_,_,_,_,_,_,_,_,1,1,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,1,_,_,_,1,_,_,_,_,1,1,_,_,_,_,_,_,_,_,_,_,_,_,1,1,_ ),\n",
+ " ( _,1,1,_,_,_,_,_,_,_,_,1,_,_,_,_,_,1,_,_,_,1,1,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,1,1,_,_,_,_,_,_,_,_,1,_,_,_,1,_,1,1,_,_,_,_,1,_,1,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,1,_,_,_,_,_,1,_,_,_,_,_,_,_,1,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,1,_,_,_,1,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,1,1,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ( _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_ ),\n",
+ " ))\n",
+ " initial_board = tf.pad(initial_board, ((0, 20), (0, 10)))\n",
+ " \n",
+ " tf_gol = ag.to_graph(gol)\n",
+ " game_ops = tf_gol(initial_board)\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(game_ops)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "7NgrSPCZxs3h"
+ },
+ "source": [
+ "#### Generated code"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ },
+ "height": 2323
+ },
+ "colab_type": "code",
+ "executionInfo": {
+ "elapsed": 753,
+ "status": "ok",
+ "timestamp": 1532101593840,
+ "user": {
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "hIGYeX0Cxs3i",
+ "outputId": "e0b62eb1-3e12-4e53-dc54-8a3fa56d823d"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "from __future__ import print_function\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "def tf__gol_episode(board):\n",
+ " try:\n",
+ " with tf.name_scope('gol_episode'):\n",
+ " directions = tf.constant(((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1),\n",
+ " (1, -1), (1, 0), (1, 1)))\n",
+ " new_board = ag__.new_list([])\n",
+ "\n",
+ " def extra_test_2(new_board_2):\n",
+ " with tf.name_scope('extra_test_2'):\n",
+ " return True\n",
+ "\n",
+ " def loop_body_2(i, new_board_2):\n",
+ " with tf.name_scope('loop_body_2'):\n",
+ "\n",
+ " def extra_test_1(new_board_1):\n",
+ " with tf.name_scope('extra_test_1'):\n",
+ " return True\n",
+ "\n",
+ " def loop_body_1(j, new_board_1):\n",
+ " with tf.name_scope('loop_body_1'):\n",
+ " num_neighbors = 0\n",
+ "\n",
+ " def extra_test(num_neighbors_2):\n",
+ " with tf.name_scope('extra_test'):\n",
+ " return True\n",
+ "\n",
+ " def loop_body(d, num_neighbors_2):\n",
+ " with tf.name_scope('loop_body'):\n",
+ " ni = i + ag__.get_item(d, (0), opts=ag__.GetItemOpts(\n",
+ " element_dtype=None))\n",
+ " nj = j + ag__.get_item(d, (1), opts=ag__.GetItemOpts(\n",
+ " element_dtype=None))\n",
+ "\n",
+ " def if_true():\n",
+ " with tf.name_scope('if_true'):\n",
+ " num_neighbors_1, = num_neighbors_2,\n",
+ " num_neighbors_1 += ag__.get_item(ag__.get_item(board,\n",
+ " (ni), opts=ag__.GetItemOpts(element_dtype=None)),\n",
+ " (nj), opts=ag__.GetItemOpts(element_dtype=None))\n",
+ " return num_neighbors_1,\n",
+ "\n",
+ " def if_false():\n",
+ " with tf.name_scope('if_false'):\n",
+ " return num_neighbors_2,\n",
+ " num_neighbors_2 = ag__.utils.run_cond(tf.logical_and(tf.\n",
+ " greater_equal(ni, 0), tf.logical_and(tf.greater_equal\n",
+ " (nj, 0), tf.logical_and(tf.less(ni, ag__.utils.\n",
+ " dynamic_builtin(len, board)), tf.less(nj, ag__.utils.\n",
+ " dynamic_builtin(len, ag__.get_item(board, (i), opts=\n",
+ " ag__.GetItemOpts(element_dtype=None))))))), if_true,\n",
+ " if_false)\n",
+ " return num_neighbors_2,\n",
+ " num_neighbors = ag__.for_stmt(directions, extra_test,\n",
+ " loop_body, (num_neighbors,))\n",
+ " new_cell = 0\n",
+ "\n",
+ " def if_true_2():\n",
+ " with tf.name_scope('if_true_2'):\n",
+ " new_cell_2, = new_cell,\n",
+ " new_cell_2 = ag__.get_item(ag__.get_item(board, (i), opts\n",
+ " =ag__.GetItemOpts(element_dtype=None)), (j), opts=\n",
+ " ag__.GetItemOpts(element_dtype=None))\n",
+ " return new_cell_2,\n",
+ "\n",
+ " def if_false_2():\n",
+ " with tf.name_scope('if_false_2'):\n",
+ " new_cell_3, = new_cell,\n",
+ "\n",
+ " def if_true_1():\n",
+ " with tf.name_scope('if_true_1'):\n",
+ " new_cell_1, = new_cell_3,\n",
+ " new_cell_1 = 1\n",
+ " return new_cell_1,\n",
+ "\n",
+ " def if_false_1():\n",
+ " with tf.name_scope('if_false_1'):\n",
+ " return new_cell_3,\n",
+ " new_cell_3 = ag__.utils.run_cond(tf.equal(num_neighbors, \n",
+ " 3), if_true_1, if_false_1)\n",
+ " return new_cell_3,\n",
+ " new_cell = ag__.utils.run_cond(tf.equal(num_neighbors, 2),\n",
+ " if_true_2, if_false_2)\n",
+ " new_board_1 = ag__.list_append(new_board_1, new_cell)\n",
+ " return new_board_1,\n",
+ " new_board_2 = ag__.for_stmt(ag__.utils.dynamic_builtin(range,\n",
+ " ag__.utils.dynamic_builtin(len, ag__.get_item(board, (i),\n",
+ " opts=ag__.GetItemOpts(element_dtype=None)))), extra_test_1,\n",
+ " loop_body_1, (new_board_2,))\n",
+ " return new_board_2,\n",
+ " new_board = ag__.for_stmt(ag__.utils.dynamic_builtin(range, ag__.\n",
+ " utils.dynamic_builtin(len, board)), extra_test_2, loop_body_2, (\n",
+ " new_board,))\n",
+ " final_board = ag__.list_stack(new_board, opts=ag__.ListStackOpts(\n",
+ " element_dtype=tf.int32, original_call=ag.stack))\n",
+ " final_board = tf.reshape(final_board, board.shape)\n",
+ " return final_board\n",
+ " except:\n",
+ " ag__.rewrite_graph_construction_error(ag_source_map__)\n",
+ "\n",
+ "def tf__gol(initial_board):\n",
+ " try:\n",
+ " with tf.name_scope('gol'):\n",
+ " board = initial_board\n",
+ " boards = ag__.new_list([])\n",
+ "\n",
+ " def extra_test(board_1, boards_1):\n",
+ " with tf.name_scope('extra_test'):\n",
+ " return True\n",
+ "\n",
+ " def loop_body(i, board_1, boards_1):\n",
+ " with tf.name_scope('loop_body'):\n",
+ " board_1 = tf__gol_episode(board_1)\n",
+ " boards_1 = ag__.list_append(boards_1, board_1)\n",
+ " return board_1, boards_1\n",
+ " board, boards = ag__.for_stmt(ag__.utils.dynamic_builtin(range, tf.\n",
+ " constant(NUM_STEPS)), extra_test, loop_body, (board, boards))\n",
+ " boards = ag__.list_stack(boards, opts=ag__.ListStackOpts(\n",
+ " element_dtype=tf.int32, original_call=ag.stack))\n",
+ " with ag__.utils.control_dependency_on_returns(render(boards)):\n",
+ " boards_2 = ag__.utils.alias_tensors(boards)\n",
+ " return tf.no_op()\n",
+ " except:\n",
+ " ag__.rewrite_graph_construction_error(ag_source_map__)\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(ag.to_code(gol))"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "p8zZyj-tq4K3",
+ "Lkq3DBGOv3fA",
+ "r8_0ioEuAI-a",
+ "7NgrSPCZxs3h"
+ ],
+ "default_view": {},
+ "last_runtime": {
+ "build_target": "",
+ "kind": "local"
+ },
+ "name": "Simple algorithms using AutoGraph",
+ "provenance": [
+ {
+ "file_id": "19q8KdVF8Cb_fDd13i-WDOG_6n_QGNW5-",
+ "timestamp": 1528465909719
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb b/tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb
deleted file mode 100644
index fff673921a..0000000000
--- a/tensorflow/contrib/autograph/examples/notebooks/autograph_vs_eager_mnist_benchmark.ipynb
+++ /dev/null
@@ -1,666 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "Pa2qpEmoVOGe"
- },
- "outputs": [],
- "source": [
- "from __future__ import absolute_import\n",
- "from __future__ import division\n",
- "from __future__ import print_function\n",
- "\n",
- "import os\n",
- "import time\n",
- "\n",
- "import tensorflow as tf\n",
- "\n",
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "import six\n",
- "\n",
- "from tensorflow.contrib import autograph\n",
- "from tensorflow.contrib.eager.python import tfe\n",
- "from tensorflow.python.eager import context\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "YfnHJbBOBKae"
- },
- "outputs": [],
- "source": [
- "import gzip\n",
- "import shutil\n",
- "\n",
- "from six.moves import urllib\n",
- "\n",
- "\n",
- "def download(directory, filename):\n",
- " filepath = os.path.join(directory, filename)\n",
- " if tf.gfile.Exists(filepath):\n",
- " return filepath\n",
- " if not tf.gfile.Exists(directory):\n",
- " tf.gfile.MakeDirs(directory)\n",
- " url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n",
- " zipped_filepath = filepath + '.gz'\n",
- " print('Downloading %s to %s' % (url, zipped_filepath))\n",
- " urllib.request.urlretrieve(url, zipped_filepath)\n",
- " with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n",
- " shutil.copyfileobj(f_in, f_out)\n",
- " os.remove(zipped_filepath)\n",
- " return filepath\n",
- "\n",
- "\n",
- "def dataset(directory, images_file, labels_file):\n",
- " images_file = download(directory, images_file)\n",
- " labels_file = download(directory, labels_file)\n",
- "\n",
- " def decode_image(image):\n",
- " # Normalize from [0, 255] to [0.0, 1.0]\n",
- " image = tf.decode_raw(image, tf.uint8)\n",
- " image = tf.cast(image, tf.float32)\n",
- " image = tf.reshape(image, [784])\n",
- " return image / 255.0\n",
- "\n",
- " def decode_label(label):\n",
- " label = tf.decode_raw(label, tf.uint8)\n",
- " label = tf.reshape(label, [])\n",
- " return tf.to_int32(label)\n",
- "\n",
- " images = tf.data.FixedLengthRecordDataset(\n",
- " images_file, 28 * 28, header_bytes=16).map(decode_image)\n",
- " labels = tf.data.FixedLengthRecordDataset(\n",
- " labels_file, 1, header_bytes=8).map(decode_label)\n",
- " return tf.data.Dataset.zip((images, labels))\n",
- "\n",
- "\n",
- "def mnist_train(directory):\n",
- " return dataset(directory, 'train-images-idx3-ubyte',\n",
- " 'train-labels-idx1-ubyte')\n",
- "\n",
- "def mnist_test(directory):\n",
- " return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')\n",
- "\n",
- "def setup_mnist_data(is_training, hp, batch_size):\n",
- " if is_training:\n",
- " ds = mnist_train('/tmp/autograph_mnist_data')\n",
- " ds = ds.cache()\n",
- " ds = ds.shuffle(batch_size * 10)\n",
- " else:\n",
- " ds = mnist_test('/tmp/autograph_mnist_data')\n",
- " ds = ds.cache()\n",
- " ds = ds.repeat()\n",
- " ds = ds.batch(batch_size)\n",
- " return ds\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "x_MU13boiok2"
- },
- "outputs": [],
- "source": [
- "def mlp_model(input_shape):\n",
- " model = tf.keras.Sequential((\n",
- " tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
- " tf.keras.layers.Dense(100, activation='relu'),\n",
- " tf.keras.layers.Dense(10, activation='softmax')))\n",
- " model.build()\n",
- " return model\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "kfZk9EFZ5TeQ"
- },
- "outputs": [],
- "source": [
- "# Test-only parameters. Test checks successful completion not correctness. \n",
- "burn_ins = 1\n",
- "trials = 1\n",
- "max_steps = 2"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "gWXV8WHn43iZ"
- },
- "outputs": [],
- "source": [
- "#@test {\"skip\": true} \n",
- "burn_ins = 3\n",
- "trials = 10\n",
- "max_steps = 500"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "DXt4GoTxtvn2"
- },
- "source": [
- "# Autograph"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "W51sfbONiz_5"
- },
- "outputs": [],
- "source": [
- "def predict(m, x, y):\n",
- " y_p = m(x)\n",
- " losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n",
- " l = tf.reduce_mean(losses)\n",
- " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
- " accuracy = tf.reduce_mean(accuracies)\n",
- " return l, accuracy\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "CsAD0ajbi9iZ"
- },
- "outputs": [],
- "source": [
- "def fit(m, x, y, opt):\n",
- " l, accuracy = predict(m, x, y)\n",
- " opt.minimize(l)\n",
- " return l, accuracy\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "RVw57HdTjPzi"
- },
- "outputs": [],
- "source": [
- "def get_next_batch(ds):\n",
- " itr = ds.make_one_shot_iterator()\n",
- " image, label = itr.get_next()\n",
- " x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
- " y = tf.one_hot(tf.squeeze(label), 10)\n",
- " return x, y\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "UUI0566FjZPx"
- },
- "outputs": [],
- "source": [
- "def train(train_ds, test_ds, hp):\n",
- " m = mlp_model((28 * 28,))\n",
- " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
- " train_losses = []\n",
- " train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
- " test_losses = []\n",
- " test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
- " train_accuracies = []\n",
- " train_accuracies = autograph.utils.set_element_type(train_accuracies,\n",
- " tf.float32)\n",
- " test_accuracies = []\n",
- " test_accuracies = autograph.utils.set_element_type(test_accuracies,\n",
- " tf.float32)\n",
- " i = tf.constant(0)\n",
- " while i \u003c hp.max_steps:\n",
- " train_x, train_y = get_next_batch(train_ds)\n",
- " test_x, test_y = get_next_batch(test_ds)\n",
- " step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
- " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
- "\n",
- " train_losses.append(step_train_loss)\n",
- " test_losses.append(step_test_loss)\n",
- " train_accuracies.append(step_train_accuracy)\n",
- " test_accuracies.append(step_test_accuracy)\n",
- " i += 1\n",
- " return (autograph.stack(train_losses), autograph.stack(test_losses), autograph.stack(train_accuracies),\n",
- " autograph.stack(test_accuracies))\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 37,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 789
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 11529,
- "status": "ok",
- "timestamp": 1531163743912,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 240
- },
- "id": "K1m8TwOKjdNd",
- "outputId": "59db8f19-23a5-413a-e9d0-fb756b0e4757"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Duration: 0.592790126801\n",
- "Duration: 0.594069957733\n",
- "Duration: 0.591835975647\n",
- "Duration: 0.592386007309\n",
- "Duration: 0.595040082932\n",
- "Duration: 0.594245910645\n",
- "Duration: 0.624264001846\n",
- "Duration: 0.6021900177\n",
- "Duration: 0.592960119247\n",
- "Duration: 0.599496841431\n",
- "Mean duration: 0.597927904129 +/- 0.0093268291102\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYkAAAEcCAYAAAAydkhNAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzs3Xd8FGX+wPHPbMum90IKvQSQ3jtSbYCAqHee9TxPT0VF\njztRT+9UzvMOsdzPUxTO3gURsYsgTRBFmvROQkJ63T7z+2OS3Wx2EwIkBC7f9+vFi+zO7Mwzz84+\n33nKPKNomqYhhBBCBGFo7gQIIYQ4d0mQEEIIUScJEkIIIeokQUIIIUSdJEgIIYSokwQJIYQQdZIg\nIYQQok4SJISow6ZNm7j44oubOxknlZWVRWZmJqqqNndSxP8gCRLilI0ZM4YePXpQXFzs9/6UKVPI\nzMwkOzsbgD//+c9kZmaybds27zpHjhwhMzPT+/raa6/lgw8+8L5+4YUXGDt2LH379mX06NHMmjUL\ngMsuu4y+ffvSt29funXrRs+ePenTpw99+/ZlwYIFAWn897//zezZs8/oOPv3789nn312Sp958cUX\nmT9/Phs3bmTUqFFntP9qtfMoGEVRGmVfQtRmau4EiPNTeno6y5cv55prrgFgz549OBwOv8JKURRi\nYmJ4+umnWbhwod/7wSxZsoRly5bx6quvkp6eTkFBAStWrADgk08+8a537bXXcvnllzN9+vQzOgZN\n0xq9cF21ahX33XcfLpdLCm7xP0FqEuK0TJkyhSVLlnhfL1myhKlTpwasN3XqVHbv3s2mTZtOus3t\n27czfPhw0tPTAYiPj2fGjBlB161vNpnVq1fzwgsv8Omnn9KnTx8uv/xyQA8u8+fP51e/+hW9e/fm\n2LFjLF68mEsuuYS+ffsyfvx43n33Xe92atcGxowZw6JFi5g8eTIDBgxg1qxZOJ1O7/LS0lIOHz5M\nt27duOWWWzhx4oS3tpOXl4emaSxYsIDx48czePBg7rnnHkpLSwFwOp388Y9/ZNCgQQwYMIAZM2ZQ\nWFjI/Pnz+fHHH3n00Ufp27cvjz322Enz8cSJE9x2220MGjSIiRMn8v7773uXbd26lenTp9OvXz+G\nDx/OP/7xj3r3D1BeXs4DDzzA8OHDGTVqFE8//bQ3/48cOcK1115L//79GTJkiLfmJ/53SE1CnJZe\nvXqxdOlSDhw4QNu2bfn888956623mD9/vt96VquVW2+9laeeeoq33nrrpNt8/PHHSUpKYtCgQXTr\n1g2D4dSvY0aMGMGtt97KkSNHePLJJ/2WLVu2jJdeeol27dqhqirx8fEsWLCA9PR0Nm3axM0330zP\nnj3p2rUrEFjr+fzzz1m0aBEWi4Wrr76aJUuWcNVVVwGwZs0aBg8ejNVq5aWXXmL27NmsXLnS+9lX\nXnmFFStW8OabbxIbG8tjjz3GX//6V+bNm8eSJUsoLy9n9erVmM1mdu7cSUhICPfccw8//fQTU6ZM\n4YorrmjQ8c+aNYsuXbrw7LPPsn//fm688UYyMjIYPHgwc+fO5frrr2fy5MnYbDb27t0LUOf+AWbP\nnk1SUhLffPMNFRUV3HrrraSmpnLllVfyzDPPMHz4cF5//XWcTifbt28/5e9LnNukJiFO25QpU/jo\no49Yu3Yt7du3JykpKeh6V155JcePH2f16tX1bm/y5Mk89NBDrF27lmuvvZahQ4cG7W84E1OnTqVD\nhw4YDAZMJhOjRo3y1lz69+/PsGHD6q31XHfddSQkJBAVFcWFF17Izp07vctWrlxZbz/Ee++9x913\n301SUhJms5nbb7+dL774AlVVMZlMFBcXc/DgQRRFoVu3boSHh5/y8R0/fpzNmzdz3333YTabyczM\nZMaMGSxduhQAk8nEkSNHKCoqIjQ0lJ49e3rfD7b/goICVq9ezZw5cwgJCSEuLo7rr7+e5cuXez+X\nlZVFbm4uFouFvn37nnKaxblNahLitE2ePJnf/OY3HDt2jClTptS5nsVi4Q9/+APPPPMM8+bNq3eb\nl112GZdddhkej4evv/6ae++9l+7duzNs2LBGSXNKSorf61WrVvH8889z6NAhVFXFbrfTpUuXOj8f\nHx/v/Ts0NJS8vDxAb/5at24d999/f52fzc7O5o477vDWjjRNw2QykZ+fz5QpU8jJyWHWrFmUlZUx\nadIkZs2ahdFoPKXjy8vLIzo6mtDQUO97qamp7NixA4C5c+fyzDPPcPHFF5ORkcHtt9/O6NGjA/Y/\nefJk7rnnHrKysnC73QwfPtybZk3TaNWqFaDXMp5++mmuuOIKYmJiuOGGG864r0icWyRIiNOWmppK\nWloa3333HXPnzq133WnTpvHyyy/z1VdfNWjbRqORiRMnsmDBAvbu3dtoQaJm85HT6eSuu+7in//8\nJ2PHjsVgMHD77bfX299Rl23btpGenk5sbGzAfqq1atWKuXPn0qdPn6DbuP3227n99tvJzs7md7/7\nHe3bt2f69Omn1AGelJRESUkJlZWVhIWFAXrtorqW17p1a2+g/uKLL5g5cyYbN27EarUG7L9du3aM\nHDmSkJAQNmzYEDQd8fHxPProowD8+OOP3HjjjQwcOJCMjIwGp1mc26S5SZyRuXPn8uqrr2K1Wutd\nz2g0cscdd/DSSy/Vuc6SJUtYtWoVFRUVaJrGqlWr2L9/v7dJ5FQkJCSQlZVVb4HvcrlwuVzExsZi\nMBhYtWoVa9euPeV9gd7UNHLkSO/r+Ph4iouLKS8v97531VVX8dRTT3mHCBcWFvLNN98AsGHDBvbs\n2YOqqoSFhWEymby1iISEBI4ePVrv/quPMyUlhT59+vDUU0/hdDrZtWsXH3zwAZMnTwbg448/9nZI\nR0ZGoigKBoOhzv0nJiYybNgw5s6dS3l5OZqmcfToUX744QdA76PJzc0FICoqCoPBcFr9SOLc1aQ1\niTlz5rBy5Uri4+NZtmyZ9/3XX3+dN998E7PZzKhRo7jvvvuaMhmikdW8oqx9xVjfVe9ll13GggUL\nKCsrC7p+REQEL7zwAgcOHMDj8ZCamsojjzwS0M7dkCvriy66iI8//phBgwaRnp7O4sWLAz4XHh7O\nAw88wF133YXL5eLCCy9k7NixdW6zvv2uWrWKv/3tb97X7du359JLL2Xs2LFomsby5cu5/vrrAbjp\nppvIy8sjPj6eiy++mLFjx5Kfn8/DDz9Mbm4u4eHhXHLJJd6C/brrruNPf/oT77zzDpMnT+aBBx6o\nN23z5s3j4YcfZsSIEURHR3PXXXcxZMgQQB/59cQTT2C320lLS2P+/PlYLJZ69/+Pf/yDf/3rX1x6\n6aVUVlaSkZHBzTffDOg1qOoAkpCQwAMPPEBaWlq93404vyhN+WS6TZs2ER4ezuzZs71BYsOGDbz4\n4ossWLAAk8lEYWEhcXFxTZUEIZpcQUEBl19++Uk75oU4HzVpvbB///5ERUX5vff222/zu9/9DpNJ\nr8RIgBDnu7Kysno7rIU4n531xsNDhw6xadMmrrzySq699lq/KRuEOB+1bduWSy65pLmTIUSTOOuj\nmzweD6Wlpbz33nts3bqVu+++29t5J4QQ4txy1msSKSkpTJgwAYCePXtiMBgoKio66eeasOtECCFE\nHZq8JlG7cB83bhzr169nwIABHDx4ELfb7R1bXh9FUcjLKzvpei1BYmKk5EUVyQsfyQsfyQufxMTI\nM/p8kwaJe++9lw0bNlBcXMzo0aO58847mT59Ovfffz+TJk3CbDZ7JxgTQghx7mnSIbCNTa4MdHKV\n5CN54SN54SN54XOmNQm5NVIIIUSdJEgIIYSokwQJIYQQdZIgIYQQok4SJM4zTo+LVza/T7GjpFG3\nuyn3Z37I2dyo22xqdredV356jzJn+clXPgUbjv/ITye2Nuo2m1qlq5JXfnqPSldlo253bfYGtubt\naNRtNrUyZzmvbH4fu9veqNv97tg6dhTsbtRtng8kSJxndhXu4dM9K1ifffJnRp+K/+54i1d+ebtR\nt9nUtufv5NO937Ix56dG3e5rO99l4fY3GnWbTe3nvO18uvdbfjyxpdG2qWoqb+36kBe3vdpo2zwb\nfszdwqd7VrDlFIJbeXk5S5Z8UOdyl8fFu3s+4vktCwOWzZ59NxUVDb9QWbRoAe+8c/6cXxIkzjO2\nqqujEmdpo23To3q8f2eX57CnaD+bT2zD5razLf+XRttPY7N5qvLC0Xh5YXc7vH8fLctmT9F+tuRt\np9JVyY6CXY22n8bmPS8aMS8qatRKjpQeY0/Rfrbl/0K5s4KdhXsabT+N7XTyoqyslCVL3g+6TFVV\nSp2+4bQHS46wt2g/2/N3UuIo47d/voPw8IgzS/Q5TJ5Md55xePRCrDELgzKX7yro8Y1PBSy/reeN\nXJDQtdH211iqC/STBUxN0xr8dLfSGtt64oenA5bf0/c2Osa0O4VUnh32Jjgvam7rH5ueDVh+/4C7\nSY9MbbT9NRbvb+QULqReeOHfZGdncdNN19C//yCGDBnGf//7EvHxCezbt4e//vtJDr69FVeJg9+5\nryVhSAbx/fRj3/nUOhYufJ1Qzcp9982kR4/ebN++hcTEZJ54Yh4Wi6XO/e7du5t//esJHA4HaWlp\n3H//w0RERPD++++wdOliTCYTbdu245FHHmfz5h959tl5Veeywv/930t+j6ltKhIkzpCmaVS6bYSb\nw87K/uwN+AFUuioJNYX6P6rT40QDQoyBJ+zJCpYiR3GD0na28sLudLN8/WGUVjag/vS//90vfLbu\nOM/eNZKIUDMADo8TBbAEzYv6b8CqeUVZH1VTsbvthDVSXmiaRoXd7T2G2nwFY93pq3BVBnw3drcD\ng2LAYgzc7skK2XJXxcmSDVTnhYMwc8MKtPdW7OOHXScatG4wFW4LDvcoVmwzs/HrdQAMyEziyjEd\nfevUyovbbruTAwf38+LLr2I2mNi8+Ud27vyF119/j+jYBP67bgWtL++KMdSM6vKw98VNRHdLxBRq\nBgUq3TZCjVaOHTvKX//6d/70pwe4/e57eP29pfz2NzP88sKtur2vH3vsEWbN+hO9evVm4cIX+e9/\nF3DnnbN4881X+eCDZZhMJm9T1jvvvMG99/6ZCy7oid1urzf4NCZpbjpDa7K/Z/bqR065KeLjNQf5\neV/+Ke/P4XECUFpHYXao9Ah/XP0Inx9a4ff+X7//J/ev+Rvf/nSM1Vuy/ZadLEgYGnCafL8jh/9b\ns5TZqx9hX/HBk65/Jj5YuZ/l6w+zeV8OUHfBvadoHyvdr2BMPszOw75JJB9c+zgPrw8+HczJCsaa\nP/D6fHrwK/64+hGOlB1r0Pons2brcWY+s7rOc8ZRVasqDfJdbj9YwIsrVzB79SOsy97ot+yPqx9m\nbpDaI5w8YFYHppNZvO8T/rj6YXIqcimtcFJS3rDPna7qSSTUOiaT2JSzmdmrH+HH3J+976mayvGK\nHP616d/e97p1605KSgqb9+ax+VAWeeuPsvv5jex96UdcpQ6cBbaqHYLNpTdxpbRKJTw2FVXTyCqP\n5ONv/fuI3t71IV8c/pZKVyUVFeVUVJTTq1dvAC666FJ+/lkfPNKxYyceeeQBvvzyMwwG/TG2PXr0\n4tlnn+KDD96hrKz0rD0mVmoSp2HBxzvYe6yEf/5hqLcw/jF3C93jMxv0+bJKJx+t0QvSRX8ec0r7\ndtRoYlE1lez8SkLMRr79KYvIcDPuhJ0AfHLwCy5u53sUZ/VoqDdW/ozmDGNEr1Q278nj25+z6DOo\n/hExFQ0YMbNg2S9Y+3+PYoCteTvqbJL5YuMRsvMruOHizAY3AdWWX2KvSpcNrHUHuR9z9R+oOW0/\n+SU27/uV7uoaSBnRIf5TFgQrZGtqSF4AfHZIn/5+V+FeWkemN+gz9fl84xEA3vxyN+1aRREd7n8V\nWV3DLA4S5J56dwuWjpsxxsFnB74lM6IncVFW3KobVVPJsxVQ6bIFXOmXniRgNjQvvj26BoADJYd5\n+TW9M7m+8/7KMR39rvpP1YKtr7Ilfwfx1jj+NvTPActXZ30PwMpj6+iXrBfQDrd+8XWsPNt7IWa1\nWvn+lxxe/mQnDsMhyg8W0emW/hhMBvYt+gnV7evLq3BV6udihYc5C77n4RsGgKKA6vFr7lx3XH82\neHFVAK5rVqR//vMZfv75J9asWcUrr7zMG2+8z29+cwNDh45g/fo1/P73N/L008/TunWb086nhpKa\nxClwq26e2byAH078SEGpHYfT472aCjGGeNf75sh3vLj1Vb8TQFU13B6VnIpc/rX5GZRQ31Xatz8d\n4+/vr+TR7+dxsOQIa7cdx+HST8D/W7KNj1Yf8K5bXRiomkqFq5K/vPYdf1n9L778ZQuLt6zh88O+\nGsTS/Z+xL6uEQ8d9P3Zr7+8wZeyiuNzBc4u3sf1AIUeK6q/RbMr9mbkb5/sNNV26/zNe/+U9lu7/\njEXb3gJAMah6Xph8efHujs94euN/ASgstfPuin2s3nqcrPwKjpZl8/iGpzhRmee3v6Nl2Ty2YR7Z\n5Tne93KLKtm63z+dmuL25ond7aDAVsjcjfM5UnqM9cc3sSZ7g54uk4tNRWsB/5rAnLWPsmz/537b\nDFbI1rT++A/8bd1T/HI01/veB3s+5u3di/lw7zLe3Ok/QsZa47xYduALXv/lvaDbPVhymMc3PEWB\nzVfj+WTdIf65dAWPbZiH26TnfUGpg4cXbQz4fHXBVu6swKN6yK04weMbniK7PAdj4hGMcXp6C50F\nzPnoTUBvgqz2x9UP89lB/+e6nKyG+d2xdTy56Tm/zv63d33Ih3uX8fbuxby3Z6nf+jWbOt/asZS3\ndy8Out09RfuYu3F+wP73Fu3n8Q1PUWiv/9EC3tq2sxRVVckqP151nuWz4sh37CvRL9AOlBxi5VH9\nvDBZTagO/Td376qHvKPElq87DIDqKccYasZgMmDPq6DymH/aVmWt48Utr+Fw6efXL4cKMcYfxxBV\nwKJt7/DRvk/91jcaDISHRxAVFcX8T57jw73L+OKLT+ndW3+ee25uDn369GPUlRPILTpBXkk+WVnH\naNeuPddccz2p7TKY99WzJ63tNQapSZyCnIoT7Cnah6U92PLTKat0en8gZoOvTXfxvk8Avc020hKB\nzeHmLws3UlLhoP3wneQ78jC3dePcOQiA17/cQ0i39RgqS3hr2yfsX5OJ060yuFsyP+7O48fdeVw6\npC1mk4Gf9h2HcH0/eeXFmNP2Ywgvw9JpM4rZ6ZfeLw9/y9KNIaCohA7wvW9udYgdBwu9r/fmnIB6\nmjezKo4DekHWM7G7d9t+DON8f6tGlq8/xMSBrfkuV1+v0mnn281ZvjxadYATSV9S5D7Bkn2f8vue\n13uXvbztNfLthXx5+Ftu6P4rAF77fDe7jhTx9J3Dvc0ImsFX4Jc6S1m6/3Oyyo/z6s53yanwFeIA\nOZaf0bRf+RWMAJ8fXsGkDhfhcquYjArlzvrb2bPK9bx4atkqXv7DlWiaxrfH1vitM639ZO/fRsXo\n21dV7eKarlfgdmuYTAYMVVeYL29/g2JHCV8c/oZfZ16h59F3B7D2/Rqlwo0lej/kdtKPtcL/GMBX\nw9TQKHOV89buD8muyOHdPUuwtPNv/jOm70ZVNW9hWu2Tg18wvvWFAJiMBspO0udwtFxvtnzmkzUc\nO2TmsVsGegNztRmdfHlRZvPtb22uXjhf3XlqQI3y+S2LcKluvj26hovbTCTErOfh05tfBGD98U1c\n2m48ALuPFHGi2MaInr4O9OoLKZfqZua/vyWmzyaKXAUs/Gkxx5z7/Pb1/t6lDE8dginUQljraHb/\n3wYiO8VT1qmQeHcYefl6HkR2TKR4j8bu5zcSkhBGeEaNxzIr+nmhOt1g0M/N3ccKMISXgtnDT/l6\nE1JbbaD3I9UjCufMeYTbH7oV1a1iNbbnuXlP4Ha7+dvfHqKiooKs8mzihqSyrWwnP727nq9WriUq\nwoojvpLWrbuy5siPXNppNJt2nWDBsh08evMgkmMbt09QgsQpcKo1flRmOw98thBTsn5SHMwug05Q\nXKO99VD+CY45NnAiK4SC0qorPbu+XDHbMbfZQZG9n/66qmbhcIIhooifStYRltMdU9pe3Fkd2X20\niHatoqh02akudnLKCqGqoNRUI8Eab4xJhzHGBnYCHsotxZSxG09eGnllpZjiT378Jc5SPKqHd/d8\nFLDM0vlH79/rtmdxfKf/8v15eazKWYkhIhq1PI6f9+UTYq3EEAZF9iLe3r2YSe0mEm4OI9+uB7Cs\nvEp2F+7jYMkR9pcWYmxVRG5RX28h6cHlS5ujzFerMwSPeB/t/oYDFYFDNw8cL+KJFW9zUYcRVIQ2\nrAlFMTsoLKvk2R9eC1h2z/JnMMbofwdrt88qLuDR5R8S7kjjst59GNsv3VvDyass4I1fPmRC+jhA\nQzFVfb9oGKLzUKwVaPZw5q/8kLEZo+nZQf/i7DX2U+Io9QaNEEMIwXy460t2lwQOb571/CqsbfZx\n54gplDkaNvZ/b24uqjOR//z8SsCy/+54y/v3m9/8ArT2W17hquTrI6vol9yLjMg0QC/cAbZnH+az\nH/7Db/tNZc+RUm+7h8vjYmveDoocJbz31TGchnKslssYkJkUkBc2tQJPeQWGEDiUXYkpITD9d727\nEEvCCdpc0d3v/ZFdb+azQ1/iyu6AMcRD+2t7BT3+rvcM1f8IM9P+sutRy9zsMn1B0jD/Y/3Pj29h\nSoKUC9sxrKsejDt27ESnW/rrad00jle2f8rvYy7i+edfBuD2FbMBOFh0lMjR7chQ+4ECof2/AuBY\nYREbsn7m5bVb0KLMvPnTl8wae3nQdJ4uCRKnoGYbrDltH6YkX6dkYUUFJ4pt/PmF9YRWXTA89+UK\nzK33YHEkAnowsLtcYAKD1YbBepR3dn0EhlQUo95UU+YuIaTbIQ6ocOLQLsxpZaglCRzMLtVHthh9\n7aB5FUUo1a89vivWmixtdwZ9/5eyzZhbHcQYdxzNHt6g4/9h3xGiLJGsrXW1CGCM8jUBFJTrV18H\nskuh6nlSH21ZiydxN0nx6VT8kkxZpQsUPcAeLc/maHk2HtXD5R18z4o+WnKCZ39eAICSEYrZamNP\n3lGKyvRCwK25vO2lr3+zBWua/v3UbO6q6evsL4O+P3/VYsytDvFVXgGt4qKCrlObYnHw+sZvyVMO\nBCwzxviaz9b+coztG7fSKsF3dffNgQ2YUw9gK6zgza/CGd6jlXfZnuL9wH427shHsfj6MjymckK6\n6PtS7WHsUyvZvszC87dfQojZSIXTd3dxsaOU/LJyUPxruDWtzAn+yGBH/C94Ig7z+MpFGMwulAZc\nlCoWB8bEoxy1B+ZFzZv7vOcqqve99cd/4KsjKylxljI67lKS43z9IsfdBzElw8vfL8dTmIK1p/7+\nCVs+Xx1ZqW+pVSgWq421u7oxIDOJVT9nkVdS5i3ZFIsDxVhV4/TUUdyl7CWwbgafHfsUU0o2isUB\nJleQNQIpZgemlEN+v4dqNcuL6lpczYsIU9IR8i17WHUskl9lTvf77M8F+gwAxqSuqGUx3vcLHYW8\ntns9lnagOqzsx065czwRlob9phvC+MgjjzzSaFtrYpWVwb7Ks+dAyWG25usdb4Zw/zZJm1bGVxty\nUEwuTAl6NVwJsaGYXHg0D1HGeLSoXNyR2WDw/UgqXXYcDg1jjN7e7tZcKFVVVqemH6/mslBuc7P8\nl42Ya5xoRwrz8eBGsThQ7WEYQho+DYGt3IwSVqIX1KoBxXLyvM0vUNlS9JNfM08wSkglmtuMS7Hh\njDwKQKm7BMXkIizESNu4ZE549BpO9bGCHkC//fkITqte81EMqrdgqb6iLiyAwjIbhpgTmBJ9zVfl\n7grsaiWqwYmzPAynoeFj5F12M4awcjTViN3lRDOePC80l4Vi6x4weOpdr9RVQk6ek30ncjDF630s\n+bZCPIoTNAXNGcIvBbsoVI56gyaAR1XB6PYWNqqmevNAqSqwNEcoTs3OcechNuf7RuqU2MvJrSxA\nMahY3LGUeAoanBeax4jBagO3BUxO7z7rz4sQTMlH/L7LYBSLHTx6Xlf3kezNO45mcIJqYOmqY3y+\ncxPG6Nrp1VDMToyR+lBsg6J4h996a1r2cBIS4J0f1uKO9J0XitGNIVyvpau2CO/fDaPvV3OEYbBW\nevvc6mM1hKElHORkYzKK7MWEmUM5VHrUOzKyurywGq2YFCM7Cnazq2hvwGcNFjuGCH0gSnGpSw9i\nVOWFAqnhKWSX5fHV7p8IDzWSEZ9yCsccSB46dAq+PrKKJfuWn/X9qg4ritlR74/Q6orHbm54YeAp\niccYXYBqCwODekoBpjmp9lCUEDuKUndeeMpivAVKQ1Sv7ymLwWCtQDE37KrxbNI0Agoe1R6GElJZ\nb4HkKY0NelVbF9UWjiG0Ak9REoaofG8N95yiKX4BFcDgCkc119+Hcqp5obnMKGYX7rw0jAlZJy34\nm4OmKgHlQrgSQ4Wmn/9RxlhevmLuGe1DRjedgoYO+atLvJKBY08f7FtH0J0JeAqT/ZarDmvQzxlC\n7CgGDdVWdxXSHO7f9q1WBD6NynW8LVHGOAAUa9WxeMz1Bgj7z6Ow/zwKrUZzln3bUFxHO3tfR+YN\nDPbRevVI6EYXzzguib0ex56+eIoS/ZZrzuBNRgarDUXRSAlLqnPbhhD/7ynCEBOwjiu7Pao9tGqb\nVeurxnoDhG3zaD0vavwm7VuH48rq4H2tHAvebl0fd0GKngc7RhJybBCeEv8OIs0ZErSAMlj1AFHf\neWGw+heccdbA58m7jnX05rdiqToXFK3eAGH/eRT2LSP939s6HFe2b+iz89Cp36Xvzk/Fsacv9q3D\n6eIZj6fUP72aMyQgQADeAFH9nQYTHu3/G9GcgX1XriNd0NxmvfCtOhcUs7PeAGH/eSSGPaP93vvL\n4D8yMm2I97Vy/DTy4kS6Ny8ce/qilvs3haoOa9ALx+oA4cpqT6ZnwinvtzYJEqegooF3mNZU86Tt\nndaRSHcGwzp15IL4bqiV/gW5pyBwigPV7msUbu0aVOd+as+EqlZEB6wzoFM6mQn6j9gQot8rUF1t\nrYvmDNWX+CEEAAAgAElEQVT/ufUflKcwGc0WhafYV6ifONywdvwQzXe87aPbMHP8BC7t050h6T1R\nbf5z37jz0wI+H2uJ8/49o/OUOvdTu+ksxRqYr5rL4s2j6lFhgc0ctbisaM5QcJur0tgKzR7BbWP1\nTkjNY6TyRFx9W/AyeXzHO65rT4a36Y2zIozi7FjC8Q9qwfIiMdQXSK7ObHheVHcO16S5Qogx6kG3\nunmvZr9KMJozFM0Rhqbqpaf7RDqaPQK1TC/UFbcVQ3lyfZvwqvkbUUvjUIuT0OwRzBw/HrPH/zcS\nLC9q/kZcR+oujO2a/8WDWhl43mquENTKSL/C9+R5EUZFse+i5sKM4SSHJdI+uq2+H1s4tvyGnRf+\neRFPa2sHnrxxAjFqBqrD/2LAk19/eeHOaUduzplXf5o0SMyZM4ehQ4cyadKkgGULFy4kMzOT4uKG\nNws0txK7HiSc+3riPHBBwHJ3rm80g0kxMrHNGK7v6RtpkBgew5O3DuWGSzIZkJmEBf9eQa0yArVC\nP3E1txlrcRfu7nsLIYVdcB7O5OpBQ7ilx3WMib7K70q+pgszhhObeyFakE66CIsVax2dusE4dumj\nLtISw/l1l2l0CumD65g+DFOzReI82B37lhGgBu7LneO7ycegmXBldaBnuO/KKtri+4EO79kKzeWf\nLrU8mhBVL8Q1p4XBCcO4p9/vmdhmDFd1nkpmXCda20Zh3zGE0OIuQdPvym6HfcdgIq1Bri5VI3ER\nDe/cc+z0jSF2HuhBO1MvrulxGXfP6EnvVp0w5/TAsW243pZfy7jWo3wvPGZcWR3I0Hp73+qYlEy/\nLr6g2ybBv1allsZ5a5kxIdFc1HYsd/a+hQltLuSazCsY1aE3N3b7FTd1vhV3bkbQ9F/Udix/GjAz\n6LQsqEZaxTb8OciOX3wXK7+94BouTB+BO1uvTd13yQRMORdwRer1PH3b2IDPTmzju4lOc1kYlz6W\nXtG+mmiUJZKkmFCuv0j/Tnu39Q8KanECmks/BtVhxZXVAeeuAbiy2+Hc34P/u2kGkzMux75tGO78\nVgRzabvx3D/gbvp2DGyr1zzGOgeBBGPfMbjqLwXnvl70iBjo/b77JffCmNMdx85BaM7AVoKaNVDV\nYcV1tBOeE74y5OaL+vDAdf2Ij7bStXVsQO26Z2J3NLf+21PtYSQ5ejHAPEnPi329MCsh5BbZOFNN\nGiSmTZvGwoWBU+vm5OSwbt06UlPPvcnB6lNaNSTQU5jCr/sF/gA8RUmYNP1kGNyqP5M7XESneF9h\nGW2Jwlw1Nj40xMTYnv53lY7r0554RV/ffbwdtw28gi4pqTw44RpmjppCu1bR9Eq8gFsvGs2Q1L4B\n+7cYLUzpcAndEzuCGniiR4eFBi8k6qCWJtA5PZpHfzuIEe17cfewX9Exwfej9eRloDmCF7Ttw3wF\n99jWI7ip7xTGd/cNMYwO8QWJLq1juWxArbvV3RZaW/Uf0YiUkVzbcwrxobFM7nARI9P1YHPjsDG0\nj23NNf0D7941ahbcxzqhVcQQExaYxoGdU+nRNjHg/eAU1DLflbtakkTfiFGM7NqJnh0SUBQFa2lH\nvZYRZCBy/2RfQBiRPJJ0T18m9KiZF5H0aB/PA9f2487pPRjQwX/opOa2oBbrV/qXthvPpPYTiQ+N\nZUqHixmaqhew/VP6MDyzG+68wLu7oy1RXNZuAq0j0/1u7vPyGIkNb9jYejNW1HJfE1C/lJ5c0XlS\n1bFDZps4nvn1dYy+oAOh5sB9DWrVz/t3iqMPUztPJMnqazq8uF9nnrh1CKN66+dZp2T/gtyshGK2\n6e+5j3bGndUJzRmK+1gXopztCbEYubDtIDRbJJ4TgQEzMTSei9uOIz0ylVCTr+D22N3kbzwGqpGo\nBk6al2hNJC1Mz++iA2tw5SUwIX0CMSH6xY1BMeDJbcvR1Yvo0yrwvHDXSJ/7aGfcxzv41ajbJSRi\nrJp646qxnRjS2f/u6mtG98BToo/p7RcxkocvvoabxvfjotYTGNiqNw9e15/fXdatQcdSnyYNEv37\n9ycqKrBKN3fuXGbPnt2Uu24Sdre9KnIb6N0xAeeBC3Adb+tdrrlCvEMOnarenhlr9TUd1CwYAdKi\n/augQ7q05t5xk8iwdqBP0gW0T9XXj4uy0qO9fzt1sKvjtlGtMRtMXD6iHWN6tw1YnhQdGXQiN4BB\nyb4r5R4J3fDs0a8WQ0P8awmpCXqB2yY5kumj2gNgtRgZETfB7yqxf4cMbz+GBzeDu6eQFOYbpB5l\n8b9ybZ/oX2DfNLEXv+47ngviM5nQZQDBJMWE8vQ9o2mbFNjOnmpNp/r0DgtSWA3skkqYOXgfUBK+\nK7wL4rsywBTYnFM7Xyod+iibET1bcUWnyd6bvQCiatSaYqPM/OWGAXRKTA1Y3iEtmj6dEompdZ7E\nhUXgzm1N97jMeqd+iQg1c8flvQPe7xjTznvDWkiQIHHLZb2wBAsewAU19ucpTmRm35v58zV9uWRw\nG349rpN32dSR7bk6yFQal3e4xG9Yc3SN731YD72wbxXh++7jQ/2bSaNqTZvisBm5od/FhDvTCPf4\n1xTiIvVjsFTdfKcFuVDqUDMvatSqPTYXBRuzQDXSvU3w/q4orSpgadA9PpPf9fwNf/vtQB67eRCu\nnO/p3ymW1sn+zaa3XX4BUWEWRvdJ49J245na4TLvsh6tfenXtKo01xiOXvO8iQg1k5nu33wXbg7D\nfbwdnqIkojTftqaN7MDvJnUnIymCzDaBv41Tddbvk1ixYgWtWrWiS5fgTQTnMpvHjuYxcdnQNsRG\nhnD7qEvILark43J9UrBhXdpy1LoHm60Mp0cPEgbFF4drF4yt4xOgxs2wVpOVWGsMfx76+5OmRfUE\nxvekqnbq0BATaXHRUKsp1WwwB5351J2bQaeu7diQq88rc2vPG3js500cKC7lRLF/dTU1Xj+JI8LM\nXDSoNf0zk/zu8PyialqQoZltWZZnxoXHe5ezuUaAql0QJkf6t8P3aJNCpCWC23rdVHcmVAl2TB0T\n0zhgVOjRPh6rMbBJM8RkCRowx2SMQHWGcCJ3PwC39bqRNVuP8x07SY4LI7ewsupY/PPfVhUkwqwm\nLswYDsDyg/oNT5E1xqxXXzyEmnxBPrrWeRFaK3g98KuhoBmJiTh5U2HbpFjwv6mYpDBfIRwsSMRH\nhpNlD8yLi9qOxaN62F6wC0014NzTj/bTWkMMdM7w/74mDW0bND3j24wG4KP9+rQU1hpX7waT3u6f\nFOEryOLC/M8Lk1KroHeb6ZPRgT4Zd1FYaueTdYcoLHOwdX8B7VNr9cMFCRLJfnnhO2+Of70fZ5GN\nrG9e5aeSNjAklBNrjlC8IxfNozFw6BC6TEjj+2NZZL2/i0r1GBvUL7n++pspLMynoqyIH5bPZ+/a\nGJ555j/e7V7QLp7UhHAsJiOXtBvPV199zu4X9PuMLhiXAfGgqRrH132GpaKcUKuJskojiUMyWLZk\nid904Rf/fprfsViMFq4Y2Jf3V0YzaNiZzw9Wl7MaJOx2Oy+88AKLFi3yvncejcDFoerjvKunCejd\nSb8yzv2lH9sLdnLjhT35pdDK81sW+rVDD08dxPaCXQFBIikyBqshDLtaidUYQmxIYGdzXRKjwqFW\n2RcT4vvhBmtW0DQ1aHNT/05paJr/SJZRvVI5kF1KXJR/gZWaqBd4kaFmjAZDwBQAvRN7cLj0KKHm\nEG7ocSUvbXuNETVGeQxI7sPBksN+hSRArDWWMFMolW4bEebwU5pu3BLkhrGksASeu2sgRqPiDX41\nGRVj0OASbg7HYFGgxqweQy9IoazSyYCuScz+z3oAbE7/+wc6pkWz+2gxrZN833G3uC4U2IswKAau\n73Y1r/7yDgNTfM2EvRIvIK8y3y94AqSEJWMxWnB6nMSERBMdFtrgyRCDHVPNGlywPqkQoyXoeRFu\nDvNODKmgMLBr3SPKTqZDdDtcVQHy6i7TeGf3Ynom6E1ukeEWPMUJ+n00VjOL933C5hPbAPzOS01V\nCOm1iofW1Zi7KgrUCI3YeA87Qow8tE7Pp5ThGk63G1utAVo1A2bN30ir8R2wn6jgv68uZEPuJj78\n5kMchZV0/v0ANE3jxNLjxOxPpPRoAdaYMP47T7+TvLKygrCwcN59922ee+7FoC0n1fLz83nhhX8z\n8s5LiYyIZNfrW+macgE/VmynbXQoz73yKgZF4R9rn8YaFsabz/pPF+4xaygoaGjeAQgXDWrNhX3T\nsFqarig/q0HiyJEjZGVlMWXKFDRNIzc3l+nTp/P+++8TH3/yeSESExveudbYVE3FqTrQPGHExYb5\npeXeUTd7/05K6s/ozP5+n52ZeEOd231txrzTSs8V4zP5oNaDtNITEr3piqoMbI4Kj7R4O4Nr6tI6\nkXCr7weTmBjJ1LGdiYsNo0fHBOKjfdsaGGml87pDDOuTHvT7mDPmD96/xycOYXy3IX7L/zj6ljqP\n6ZXpwaesPplWyYFV6tZJyaSn6UEzrDwwiERFhxBvCPxBJ8fGUOH01Z6qj/G6SfpAhTk3DODtL3cz\nYUg7IsJ8BetDNw/mx125XNgvw1ugPzL+bu/ySxNHcWmPGh3YwANjbq/jiCJ544pn6lhWv7TkwFE0\nHVulk5igH4fxROBFWWxsGDFB+pZaxcXjKKq6i91i4qFfDQlYp6H+fpGveXla4nim9fY1x4VHWnHu\n0X8z6VfHEFZhwWioDopGEsPi0DQoKncQGm6qsaxqDYOC2WQIeM9kMmOrNWo9M60NidF6XriPBd40\nmZYcR3hZCGX7CynfX8ie/2xE0yDaGEFFUQmhyRGc+Oogr722gFGjRtG/v55ugwHi48OJiQn8TZjN\nRmJjw8jOPsDQoUN4YsajAHxQ8QH79+/nrdue54rPruClBc8yatQo/jnlQRRF4Xfv7+Lvf3+YcePG\nMW7cOMLCwnj3qucbkNuNq8mDRM2aQufOnVm7dq339ZgxY1iyZAnR0Q27gm7Om+mqH4mIx4Tb4W7W\ntCQmRlJQEDivTqIh2Zsui0sv2N0FKWSkWjjuOEKoO5KcysB5nBw2D1FVtZBu8V282+jeOgbVGXis\nf/61fjXc3Dc3gp4X+fmBeRGlxnrTF1o1jHJoqwHsKThKvjMHg91KaVngyA9bhYd4q3612S+pV8Ax\ndkyJ5KHr+mOrcGCr8B9336NNbNC0nC2JiZEUFQQek8UZ7j2OKEUPqGMyRrA9fycnbPm4yg0Ulgam\n21WpkWLR+076J/Vusu+7ZhlRUWbjorQJXJR2ZuP7ExMjOZ5bxF0r5/i9b7BZyat6/kiCUf+eL2k7\njq926M2kFSUuCstKQdNIG92RqD567emuPr+nwFbIG7ve56a/3Ul8bgT/+Mc/GThwMDfccDOqqlFQ\nUI7LFdjE5XJ5KCqqpKSkEpvN6c3HsjI7lZVOHA6FhQvfZMOG9fz3v6+yZMnH3H//X3j88Xne6cKf\ne+7fvPHG+6f1DIkzvbhu0iBx7733smHDBoqLixk9ejR33nkn06f75iRRFOW8aW6yVwUJzW0ixNLw\nIXJnw+UdLqFnQjeSw33NAe2i22DfPhStMoJx3buSlqGQHpnK4bKjAZ83Goy0i27DnwfcRXI9N6md\nD67oNJnu8V38bhrLjOvEnwbMJD0iFafHRYG9kMSweOwnAm8iNBmMdI3rzJ/6z6RVxJlNZ9AcajZL\n/brLdDrHdiTC7Ksl9Erozp8GzCQjIo1L2o2jyF5CdEik9/w2G8zeZiGTwUSPuM7M7n8naRHBh5M2\ndppNxsYbS2My+Iq367peRYeYdn79UANS+tAqIpmMiDT6RffkD69sw2qyYnc7iOwYT/7KI4R3j8do\nMWIrrqBLdHt+1/4auqR0JrRXKKGhVj77TJ+BISwsnIqKCqKi6r7g7dbtAp599ilKS0sID4/g66+/\n4IorrqakpBiz2cyoUReSmprG3//+V8A3XXiPHr34+usvsNkqm+VZ2k0aJObNq78p5Ztvgk8ydi6q\nflANHrN39MS5IsoS6RcgqoW4Y7HjIToslIxIvRmiui8gMTQeDci3FXgL1GA3Wp1vokOi/Nqdq1U/\n+MdqCvEWeNWFZ5vIDPJtBVS4K70d6q2jmq4j8GyJsUaTGObfjKsoijcvQk2hhEbo50N1f1mH6Lbs\nLtqHhkakJRJFUWgTFfzei8aUnhiuT/rYROKsMSSE+jfFGRSDNy9S4lPo3asv119/NfFdWhE5OI5E\nezRbXtoEwHPxOfztkb/jPGHj1odvwlDVnHXfffcDMHny5dx330wSEhL9Oq7BFwTj4xP4/e9v5847\n9YEpQ4YMZ/jwkezbt5e5c/+KpqkoisKtt97pN104aFx11TXNEiBAZoFtsOrmJs1j8nZcnyvqmvX0\n4RsHsHV/gd8wuD6JPbi6y1R6JlyAqnnYUbCLXgndg37+fBT0PoA6DEkdgEtz0z+pNw6Pg91F++kc\ne/pPRDvXBBvJVJcxGSMwGUwMbtWPMmc5B0uOkBF59u5j+ttv655NoDHU9Rup6S9/0fsKXKqb1Vnr\nGTZqEEW/LeJYWTb9U/oAkJqaxsCBgwM+O336VUyfflXQ7T777Avev8eNm8i4cRP9lnfs2IlFi94I\n+Fz1dOHNTYJEA9mqaxJuE9ZzrLnJbAj+NSbHhjG+v/8oIUVR/EYbDU8LPOHPZ6dyR7lBMTA6fRgA\nEYQzNLRhUyecL04lYBoNRu/Q3VBTaNDa2PnsVPLCbDAxJmMEACnhyaSEN2x6kf9VMndTA9WsSZxr\nzU1K0McNtUwmRa57qp1KTeJ/neTF6ZMg0UB27+gmMyHmcyvbGjqGviWQvPAxGyVgVpMgcfrOrdLu\nHFZUdVOR5gpp0htXTsWENvrso9Wdby3ZqKpmo8TQIM+nbGEGt9LH7tcc1dRS9U3qiclgqnM6GnFy\n8tChBlqw9VW25O/A9tOFvHj3BMym5mtySkyM9OaFqql+U3+0NJIXPpIXPpIXPmd6n0TLzblTdLQ0\nB8VjRvFYGnUs95lqySd/bZIXPpIXPpIXZ0ZyrwFKKuwU2AtxV4bTNiVa2r2FEC2GBIkGyC4tQDFo\naI5QxvQ9/284E0KIhpIg0QA2pz4RWKg5hCEXnH9TNQghxOmSINEAdrc+XUByTDgGaWoSQrQgEiQa\nwO7yTXgmhBAtiQSJBnC4q56sFuThNkII8b9MgkQDVNck5A5WIURLI0GiAZxVfRIWCRJCiBZGgkQD\nODzVQUKam4QQLYsEiQZweqQmIYRomSRINIDT7QYgxGQ5yZpCCPG/RYJEA1TXJELM0twkhGhZmjRI\nzJkzh6FDhzJp0iTve08++SQXX3wxU6ZM4c4776S8vLwpk3DaDpcepdJVCYDLo9ckrNInIYRoYZo0\nSEybNo2FCxf6vTd8+HCWL1/O0qVLadOmDS+++GJTJuG0fPLjLzy56Tn+svopnl+yDZeqBwmL1CSE\nEC1MkwaJ/v37ExUV5ffe0KFDMRj03fbu3ZucnJymTMJpWbJ2NwA2Stm0O88bJKzSJyGEaGGatU/i\ngw8+YOTIkc2ZhAZxVt1xHWaRmoQQomVptjGd//nPfzCbzX79FSdzpk9Yaij/ViUNZ1WfREpi7FlL\nw8mcK+k4F0he+Ehe+EheNI5mCRJLlixh1apVvPbaa6f0ubP1+FLFqPpemFzeaTls5U7ylOZ7hGq1\nmo9mbOkkL3wkL3wkL3zONFg2eZCo/Qjt7777jpdffpk33ngDi+XcbOM3GHxBQrHYcHpcmACzzAIr\nhGhhmrTUu/fee9mwYQPFxcWMHj2aO++8kxdffBGXy8VNN90EQK9evXjkkUeaMhmnTFVUb2eNEmKH\nqqAhU4ULIVqaJi315s2bF/De9OnTm3KXjULV3N4gYQipBKU6SEjHtRCiZZE7rmtRNQ0Vj/e1ElKJ\nUlWTMBuMzZUsIYRoFtJ+UovHo3qblwAUayWKUR/dJM1NQoiWRkq9Wlxuzdu8BGCMLvD+LUFCCNHS\nSHNTLW6P6m1eqs2gSHYJIVoWKfVqcXtUMOh9EjVH75qVc3O4rhBCNCUJErW4PKqvucntG800NfU3\nzZQiIYRoPhIkanG7fc1NmttXe+iakdBcSRJCiGYjQaIWt8fXca3VqElYTSHNlSQhhGg2EiRqcdUc\nAlsjSFgM0ichhGh5JEjU4qnZcV2juckiT6UTQrRAEiRqcXlUlCDNTTL8VQjREknJV4vbrXmbm6YP\n69rMqRFCiOYlQaIWd40+iQhzWDOnRgghmpcEiVqq75NQMMiIJiFEiydBohb9PgkPRsVIiFGChBCi\nZZMgUYvbo4LRjVmxYJbnRwghWjgJErW4PBqK0U2IIQRFae7UCCFE85IgUYvL7QGjG4tBmpqEEEKC\nRC02pxPFoGE1WkkJTwagT2KPZk6VEEI0jyZ9is6cOXNYuXIl8fHxLFu2DICSkhLuuecesrKySE9P\n5+mnnyYyMrIpk3FKyl2VYIBQs5UoSyT/HPFXGeUkhGixmrQmMW3aNBYuXOj33oIFCxgyZAhffPEF\ngwYN4sUXX2zKJJyyCqcNgDBzqPd/udtaCNFSNWnp179/f6Kiovze++abb5g6dSoAU6dO5euvv27K\nJJyySpcdgAhLaDOnRAghmt9Zv0QuLCwkIUF/NkNiYiJFRUVnOwn1srklSAghRLUm7ZNobImJTd93\n4dIcACTHxZ6V/Z2uczltZ5vkhY/khY/kReM460EiPj6e/Px8EhISyMvLIy4ursGfzcsra8KU6aqb\nm9x25azs73QkJkaes2k72yQvfCQvfCQvfM40WDZ5c5OmaX6vx4wZw+LFiwFYsmQJY8eObeoknBKn\nqtckQk3WZk6JEEI0vyYNEvfeey9XX301Bw8eZPTo0Xz44YfccsstrFu3jokTJ7J+/XpuueWWpkzC\nKatubgo1SpAQQogmbW6aN29e0PdfeeWVptztaXO5VTSDC4AwmSZcCCHkjuuabA43mKqChElGNwkh\nhASJGmxON0p1kDBLkBBCCAkSNdgcbjC6QFOwyrMkhBBCgkRNNrtekzArISgyT7gQQkiQqKnS4UEx\nurEoMrJJCCFAgoSfSrsLTC55bKkQQlSRIFFDudOOYlAJNUqntRBCgAQJP2WOCgDCTHKPhBBCgAQJ\nP2VOPUhEWCRICCEESJDwU+4sByA6RGaPFEIIkCDhp8Kj1yRiQ6ObOSVCCHFukCBRg60qSMSFRp1k\nTSGEaBkkSNTg0CoBiJUgIYQQgAQJPy7FBkifhBBCVJMgUYPHoD+VLtIc0cwpEUKIc0ODgsSnn35K\nebk+8ueZZ57ht7/9Ldu3b2/ShDUH1WgDjxmz0dzcSRFCiHNCg4LEf/7zHyIiIti6dStr1qzh8ssv\n57HHHmvqtJ1VTo8LzVKOySn9EUIIUa1BQcJk0h9gt3btWmbMmMGkSZNwOBxNmrCzLacyFxQwu2Oa\nOylCCHHOaFCQUBSFjz/+mOXLlzNkyBAAXC5XkybsbMsqzwEgxCNBQgghqjUoSDz44IN8/vnnzJgx\ng4yMDA4dOsSgQYPOaMevvPIKl112GZMmTeLee+/F6XSe0fbOVKGtCIAQTUY2CSFEtQYFib59+/L8\n889z/fXXA9C2bVseeuih095pbm4ur7/+OosXL2bZsmV4PB4+/fTT095eY3B53ACYDKZmTYcQQpxL\nGhQknnjiCcrKynC73fz617+md+/eLF269Ix2rKoqNpsNt9uN3W4nKSnpjLZ3plweDwBmo7FZ0yGE\nEOeSBgWJdevWERkZyZo1a0hOTuaLL75g0aJFp73T5ORkbrzxRkaPHs3IkSOJjIxk6NChp729xiBB\nQgghAp1S28oPP/zA+PHjSU5OPqNnQJeWlvLNN9/w7bffEhkZycyZM1m2bBmTJk2q93OJiU3XX2C0\n6McTHmpt0v00lvMhjWeL5IWP5IWP5EXjaFCQiI+P58EHH2Tt2rXccsstuN1uPFVX3qdj3bp1ZGRk\nEBOjjyQaP348mzdvPmmQyMsrO+19nkx5hT6kV3VrTbqfxpCYGHnOp/FskbzwkbzwkbzwOdNg2aDm\npnnz5tGxY0fmz59PdHQ0OTk53Hjjjae909TUVLZs2YLD4UDTNL7//ns6dOhw2ttrDG5V77i2mKTj\nWgghqjWoRIyLi+M3v/kNBw8eZN++fbRt25Zp06ad9k579uzJxIkTufzyyzGZTHTr1o0rr7zytLfX\nGNyq9EkIIURtDQoS27ZtY+bMmVgsFjRNw+1289xzz9G9e/fT3vEdd9zBHXfccdqfb2zVQcJilJqE\nEEJUa1CJ+PjjjzN37lzv3dbff/89jz76KO+8806TJu5s8kiQEEKIAA3qk7DZbN4AATB48GBsNluT\nJao5eFQVkD4JIYSoqUFBIjQ0lO+//977euPGjYSGhjZZopqDt7lJgoQQQng1qEScM2cOd911FxaL\nBdAn93v22WebNGFnm0eT5iYhhKitQSViz549+fLLLzl48CCaptGuXTsmTJjAypUrmzh5Z4+3ucks\nQUIIIao1uEQ0m8107tzZ+1rTtCZJUHOprkmESHOTEEJ4nfYzrs9kWo5zkaqpaBqESE1CCCG86i0R\n9+3bV+cyt9vd6IlpTh7NA5qC2XTacVMIIf7n1BskbrnlljqXhYSENHpimpOqqaAZJEgIIUQN9QaJ\nFStWnK10NDsVVa9JGCVICCFENSkRq+g1CWluEkKImqRErKJR3dwkE/wJIUQ1CRJV9NFNUpMQQoia\npESsokmfhBBCBJASsUp1kDCZ/rfu/xBCiDMhQaKKhgoYMBokS4QQopqUiFU0RUOR7BBCCD9SKlbR\nUFE0aWoSQoiaJEhUU1SpSQghRC3NViqWlZUxc+ZMLr74Yi699FK2bNnSXEmpomFQJEgIIURNzTbl\n6eOPP86oUaN49tlncbvd2O325kqKfre1gtQkhBCilmYpFcvLy9m0aRPTp08HwGQyERER0RxJAcCj\n6eCf+HkAABL8SURBVA8cMkiQEEIIP81SKh47dozY2Fjuv/9+pk6dykMPPdSsNQmn2wUgzU1CCFGL\nojXDI+a2b9/OVVddxTvvvEOPHj14/PHHiYyMZObMmWc7KQDc/I9PKW27jAhXOot+80CzpEEIIc5F\nzdInkZKSQkpKCj169ABg4sSJvPzyyyf9XF5eWZOkJ7ewgtC24HY13T4aU2Ji5HmRzrNB8sJH8sJH\n8sInMTHyjD7fLO0rCQkJtGrVioMHDwLw/fff06FDh+ZIik7R+yQczv+t53YLIcSZarbRTQ8++CD3\n3XcfbrebjIwM/v73vzdXUlAUPTi43BIkhBCipmYLEpmZmXz44YfNtXt/VUEiKvR/65GsQghxpmQ4\nD2C16v/3bJ/YvAkRQohzjAQJwGPQh9/GhUU3c0qEEOLc0uKDhKZp3iARZWm+G/qEEOJc1OKDhKpp\nYHICEGk5s6FiQgjxv6bFBwm3R0MxOwCIkiAhhBB+WnyQ8HjUGkFCmpuEEKKmFh8kXB4NxSzNTUII\nEUyLDxJ6TcKJQTMRYrQ0d3KEEOKc0uKDhNujgtGNEXNzJ0UIIc45LT5IuDwaisGDQYKEEEIEaPFB\nwuNRweDB2HwzlAghxDmrxQcJt0cDgweTIjUJIYSorcUHCYfbhWLQMCpSkxBCiNpafJCwu/R7JEzS\nJyGEEAEkSLj1eyRMBqlJCCFEbS0+SDiqgoRZkXskhBCithYfJOwevbnJbJDmJiGEqK3FBwmnxwVI\nkBBCiGBafJDwNjdJkBBCiADNGiRUVWXq1KnceuutzZYGp6oHCZm3SQghAjVrkHjttdfo0KFDcybB\n29xkMUpNQgghamu2IJGTk8OqVauYMWNGcyUB8NUkLMaQZk2HEEKci5otSMydO5fZs2ejKEpzJQEA\npyo1CSGEqEuz3EG2cuVKEhIS6Nq1Kxs2bGjw5xITG/+hQIrRA25IiI5qku03lfMprU1N8sJH8sJH\n8qJxNEuQ+Omnn1ixYgWrVq3C4XBQUVHB7NmzefLJJ+v9XF5eWaOnpcJhB8DtUJtk+00hMTHyvElr\nU5O88JG88JG88DnTYNksQWLWrFnMmjULgI0bN7Jo0aKTBoim4lLdYACrWUY3CSFEbS3+PgmP6gbA\napIgIYQQtTX7rHYDBw5k4MCBzbZ/t+YBIFRqEkIIEaDF1yTc1TUJCRJCCBGgxQcJD3qQCLVIkBBC\niNpafJBQpblJCCHqJEECPUiEyM10QggRoMUHCY+mNzfJk+mEECJQiw8SGh5QDc0+PYgQQpyLWnyQ\nUBUPaC0+G4QQIqgWXzpqeFA0Y3MnQwghzkkSJBQVBQkSQggRTIsPEigqBgkSQggRVIsOEm6PCgZp\nbhJCiLq06CDhcqtgUDEoEiSEECKYFh0knC4PikHFKM1NQggRVIsOEjaX/nxroyI30gkhRDAtOkjY\nXfrzrY3S3CSEEEG16CBRWqk/utRskHmbhBAimPMmSLg8LrLLcwBQNRW724GqqWe0zRKbDYAQkwQJ\nIYQI5rxpjL9l8UNUqCVYK9NwhRTgMdoxYuKC+K5M7XQJiWHxp7zNUptek5AgIYQQwTVLTSInJ4fr\nrruOSy65hEmTJvHaa6+d9DMVagkA9rAsPEY7amUELruZLQXbeHLj8xTaik45HcW2cgDCLaGn/Fkh\nhGgJmqUmYTQauf/+++natSsVFRVMmzaNYcOG0aFDhzo/MzTqUoa37cH3ew6SEBnJ/7d390FR1f8e\nwN+7KynyoCIrGJKDOPhTygdMsOCiFwkMQXYn0IlxakbNMgt5SMKdUeeq6Uw4zNRtHDMrs7g5eUt/\nU/izudH4dMW1SLQGLdExWIpdEZAnZV32c//gsoayiLl4kH2//trztPs9n+Hw3u+ec76nuXEIrDc7\n8H3NEbQF/YatJ7cjL+qVe+pRNLY3AQD8ho28730iIhqMFAkJrVYLrVYLAPDy8kJoaCgsFkuvIZH1\nbDKuXGnGeH+/bvP/vSEIm//nv9Dm/xt2nP4Mhqdeg0bdebWSpa0OgGDMcK1jfXOrBaOGjcIjGg80\nW5uAR4DRwxkSREQ9UfzEtclkwvnz5zF16tS/tb121HBk/dsi2K+NRm17DTaWFuJi42WUmc9gk3Eb\n/uNkAX6trwQAXL5WhY3GbfjvC/8EALTYOn9uCvBmSBAR9UTRE9etra3IzMyEwWCAl5fX336fkLG+\nmD92IQ5W/wt1o2tR+NP2bsvfLd+JkCHTUWW+BowG/vePU8j4RxpuSCsAQOvFkCAi6oliIWGz2ZCZ\nmYnU1FTEx8f3aRut1sfpsuUpkZhW8RgKSoogI6vQcU0LsQ4D7GoMefQSLtnPAD4e6Hr+nHXITdyw\nd4ZE6LggDBsy9H536YHqrRbuhrW4hbW4hbVwDZWIiBIfnJeXh1GjRmHt2rV93ubKlea7rtN24yaq\nLS34s74NlaZrmBg0Akfr/wWz6rfuK3YMATQ2qMUD/znvrXttvqK0Wp8+1cIdsBa3sBa3sBa33G9Y\nKtKTKCsrw9dff42wsDDodDqoVCpkZ2cjNjb2vt97+DAPTHpsFCY9NgpzpwcBAOzVk/Dlhc6QiBoT\nBaPFCGhsAABP9d//mYuIaLBTJCRmzpyJc+fOPbDPG+sV4HidEBLTGRL/z8+T5yOIiJx5aO64vh9h\nI0Mxd1w0IgMjoPX077Ys0MfPyVZEROQWIaFRa5AeluqYHj1sFK7e6LxDe8RQntwiInJG8fsklGCI\nzHG8fkTziIItISIa2NwyJP56uatCF3cRET0U3DIkACBxfBwAIHz0JIVbQkQ0cLnFOYmeJE9IwJxx\n0TwnQUTUC7ftSahVagYEEdFduG1IEBHR3TEkiIjIKYYEERE5xZAgIiKnGBJEROQUQ4KIiJxiSBAR\nkVMMCSIicoohQURETjEkiIjIKYYEERE5xZAgIiKnFAuJo0ePYv78+UhMTMTOnTuVagYREfVCkZCw\n2+3YtGkTPvzwQ3zzzTcoLi7GxYsXlWgKERH1QpGQOHv2LMaPH4+goCB4eHhgwYIFKCkpUaIpRETU\nC0VCwmw2Y+zYsY7pgIAAWCwWJZpCRES9UCQk+FxpIqKHgyKPLw0MDMQff/zhmDabzRgzZsxdt9Nq\n+SS5LqzFLazFLazFLayFayjSk3jiiSdQVVWFmpoaWK1WFBcXY968eUo0hYiIeqFIT0Kj0WDdunVY\nunQpRARpaWkIDQ1VoilERNQLlfAEAREROcE7romIyCmGBBEROcWQICIipwZ8SLjjGE8GgwFPP/00\nUlJSHPOuXbuGpUuXIjExEcuWLUNzc7Nj2ebNm5GQkIDU1FScO3dOiSb3i9raWrzwwgtISkpCSkoK\n9uzZA8A9a2G1WpGeng6dToeUlBS89957AACTyYRFixYhMTEROTk5sNlsjvWzs7ORkJCAxYsXd7vk\nfLCw2+3Q6/V45ZVXALhvLeLi4rBw4ULodDqkpaUBcPExIgNYR0eHxMfHi8lkEqvVKgsXLpTKykql\nm9XvfvjhB6moqJDk5GTHvLffflt27twpIiLvv/++FBQUiIjI4cOH5aWXXhIRkfLycklPT3/wDe4n\nFotFKioqRESkpaVFEhISpLKy0i1rISLS1tYmIiI2m03S09OlvLxcVq9eLQcPHhQRkfXr18vnn38u\nIiJFRUWyYcMGEREpLi6WrKwsRdrcnz7++GPJzc2Vl19+WUTEbWsRFxcnjY2N3ea58hgZ0D0Jdx3j\n6cknn4Svr2+3eSUlJdDr9QAAvV7vqENJSQl0Oh0AYNq0aWhubkZdXd2DbXA/0Wq1mDx5MgDAy8sL\noaGhMJvNblkLAPD09ATQ+c3YZrNBpVLBaDQiMTERQGctvvvuOwDd/14SExNRWlqqTKP7SW1tLY4c\nOYL09HTHvJMnT7plLUQEdru92zxXHiMDOiQ4xtMt9fX18Pf3B9D5z7O+vh4AYLFYEBgY6FgvICAA\nZrNZkTb2J5PJhPPnz2PatGm4evWqW9bCbrdDp9MhOjoa0dHRCA4Ohq+vL9TqzsM4MDDQsb9/rYVG\no4Gvry8aGxsVa7urbdmyBXl5eVCpVACAhoYGjBgxwi1roVKpsGzZMjz33HPYt28fALj0GFHkZrq+\nEt7CcVc91ajrwBksWltbkZmZCYPBAC8vL6f7N9hroVarceDAAbS0tGDVqlU9Dq/ftb+310JEBk0t\nDh8+DH9/f0yePBlGoxFA5/7dvs/uUAsA2Lt3ryMIli5dipCQEJceIwM6JP7uGE+D0ejRo1FXVwd/\nf39cuXIFfn5+ADq/CdTW1jrWq62tHVQ1stlsyMzMRGpqKuLj4wG4by26eHt7Y9asWThz5gyamppg\nt9uhVqu77W9XLQICAtDR0YGWlhaMGDFC4Za7xk8//YTvv/8eR44cQXt7O1pbW7FlyxY0Nze7XS2A\nzp4CAPj5+SE+Ph5nz5516TEyoH9ucucxnm5P/Li4OHz11VcAgP379zvqMG/ePBw4cAAAUF5eDl9f\nX0c3czAwGAyYOHEiXnzxRcc8d6xFfX294wqVGzduoLS0FBMnTkRUVBQOHToEoHst4uLisH//fgDA\noUOHMHv2bGUa3g9ycnJw+PBhlJSUoLCwEFFRUdi2bZtb1uL69etobW0FALS1teH48eMICwtz6TEy\n4IflOHr0KN566y3HGE8rVqxQukn9Ljc3F0ajEY2NjfD398frr7+O+Ph4rF69Gn/++SceffRRvPPO\nO46T2xs3bsSxY8fg6emJrVu3Ijw8XOE9cI2ysjIsWbIEYWFhUKlUUKlUyM7OxtSpU5GVleVWtfj1\n11+Rn58Pu90Ou92OpKQkrFy5EtXV1cjJyUFTUxMmT56MgoICeHh4wGq1Ys2aNTh37hxGjhyJwsJC\njBs3TundcLlTp07ho48+wo4dO9yyFtXV1XjttdegUqnQ0dGBlJQUrFixAo2NjS47RgZ8SBARkXIG\n9M9NRESkLIYEERE5xZAgIiKnGBJEROQUQ4KIiJxiSBARkVMMCXroLFq0CHq9HgsWLEB4eDj0ej30\nej0MBsM9v9fy5cv7NHT02rVrUV5e/neae08qKirw7bff9vvnEPUV75Ogh1ZNTQ3S0tJ6HdWza5iG\nh8W+fftQWlqKwsJCpZtCBGCAj91EdK9KS0tRUFCA6dOno6KiAqtWrUJ9fT2KioocD6HJz89HZGQk\nAGDOnDnYvXs3QkJCkJGRgRkzZuD06dOwWCxITk5GVlYWACAjIwOvvvoqYmJisGbNGnh7e+PixYsw\nm82IiIjA1q1bAXSOhZOXl4eGhgYEBwejo6MDcXFxWLx4cbd21tXVITc3Fw0NDQCAmJgYLF++HNu3\nb0dbWxv0ej2ioqKQn5+P06dPo7CwENevXwcAZGZmIjY2FlVVVcjIyEBycjLKyspgtVqxYcMGRERE\nPJBak5u4n4ddECnJZDLJ7Nmzu807ceKETJkyRX7++WfHvL8+kKWyslLmzp3rmI6NjZVLly6JiMjz\nzz8vubm5IiLS1NQkkZGRYjKZHMuOHTsmIiJvvPGGLFmyRG7evCnt7e0yf/58MRqNIiKycuVK+eCD\nD0REpLq6WmbMmCF79+69o+27du2S9evXO6abmppEROSLL76QnJycbm3X6XRy9epVERGpra2V2NhY\naWlpkd9//10mTZokxcXFjn2fO3eu2Gy2vheR6C7Yk6BBZ8KECXj88ccd05cvX8a7774Li8UCjUYD\ni8WCxsZGjBw58o5tn332WQCAj48PQkJCUFVVhaCgoDvWe+aZZzBkSOfhM2XKFFRVVSEyMhJGoxGb\nN28GAIwbN87RY7nd9OnT8dlnn2Hbtm2YNWsWYmJielyvrKwMJpMJy5Ytcwz6qNFoUF1djeHDh8PT\n0xNJSUkAgKeeegoajQaXL19GaGhoX8tF1CuGBA06Xl5e3aazs7OxYcMGzJkzB3a7HVOnTkV7e3uP\n2w4dOtTxWq1Wo6Oj457W6+tzCmbOnIn9+/fjxIkT+PLLL7Fr1y58+umnd6wnIggPD8fu3bvvWFZV\nVXXHPLvdPqielUDKe3jO6BH1QPpw3UVLS4tj1M+9e/c6/cfvCpGRkY4hmmtqanDq1Kke1zOZTPD2\n9kZSUhLy8/Pxyy+/AOh8VsRfH1ofERGByspK/Pjjj455Z8+edby+fv06Dh48CKDz8Z0AMH78eNfu\nFLk19iToodaXb80GgwErVqzA2LFjERUVBR8fnx63v/29nC3rbb1169bhzTffRHFxMSZMmICIiIhu\nn9eltLQUe/bsgUajgYhg06ZNAIDo6Gh88skn0Ol0mD17NvLz87F9+3YUFBSgubkZN2/eRHBwMHbs\n2AEA8Pf3x4ULF5Ceng6r1YrCwkJoNJq71oSor3gJLJELtbe3w8PDA2q1GmazGenp6SgqKkJwcLDL\nP6vr6qbjx4+7/L2JurAnQeRCly5dwtq1ayEisNvtyM7O7peAIHpQ2JMgIiKneOKaiIicYkgQEZFT\nDAkiInKKIUFERE4xJIiIyCmGBBEROfV/smX5vm0Z6kkAAAAASUVORK5CYII=\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f970d490590\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "test_accuracy 0.1\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXl4FFW6/79V1Vv2BEhIAG/AuCAIsgjoCEFgFDSsio7I\n6Dg4l/GODgpu4wxcnWHEHYXBDQVllJ/LRUAhDKCgYd+XsCVhS0IWOnvSSXqtqt8f1V1d1V2d7iwN\nIbyf5+Ghu6q66tRJ1fmedznnMKIoiiAIgiCIILCXuwAEQRDElQEJBkEQBBESJBgEQRBESJBgEARB\nECFBgkEQBEGEBAkGQRAEERIkGARBEERIkGAQVx0HDhzAPffcc7mL0eEZOHAgioqKLncxiDaEBIOQ\nGT16NPr164eamhrV9kmTJqF3794oKSkBAPzlL39B7969cezYMfmYwsJC9O7dW/7+yCOPYNWqVfL3\njz76CGPGjMGgQYNw5513Ys6cOQCA8ePHY9CgQRg0aBD69OmD/v37Y+DAgRg0aBCWLl3qV8YlS5bg\nhRdeaNV93nrrrfjPf/7TrN98/PHHePfdd7Fv3z6MHDmyVdf34FtHHY3Dhw+jR48el7sYRBuiu9wF\nINoXPXr0QGZmJqZPnw4AyMvLg91uB8Mw8jEMwyA+Ph7vvfceli1bptquxZo1a7Bu3TqsWLECPXr0\nQGVlJbZu3QoAWL9+vXzcI488gsmTJ+P+++9v1T2IohiwLC0lKysLzz33HJxOZ5ufu73C8zw4jrvc\nxSDaEWRhEComTZqENWvWyN/XrFmDKVOm+B03ZcoU5Obm4sCBA0HPefz4cQwfPlzubXbu3BkPPPCA\n5rFNzVSzfft2fPTRR9iwYQMGDhyIyZMnA5CE5t1338W0adMwYMAAFBUVYfXq1bj33nsxaNAg3HXX\nXfjmm2/k8/haCaNHj8by5csxceJEDBkyBHPmzIHD4ZD319XVoaCgAH369MHMmTNRVlYmW0Hl5eUQ\nRRFLly7FXXfdhdtuuw2zZ89GXV0dAMDhcOD555/HsGHDMGTIEDzwwAOoqqrCu+++i4MHD2L+/PkY\nNGgQ/vnPf2re89NPP43hw4djyJAheOSRR3DmzBl5n91ux+uvv47Ro0djyJAhmD59ulzuAwcO4KGH\nHsKQIUMwatQorF27Vq4rpVWzZs0aPPzww/L33r17Y+XKlRg7dizGjh0LAHj11Vdx5513YvDgwbj/\n/vtVf3NBEPDRRx/hrrvukvebzWb5XBcuXJDr4Y033sCoUaMwfPhwvPLKK3JZq6ur8cQTT2DIkCEY\nNmwYfvvb3wZ8BojLCwkGoeKWW25BQ0MDzp07B0EQsHHjRkycONGvITeZTHjiiSewcOHCkM65du1a\nLFu2DMePH4cgCC0q24gRI/DEE0/g3nvvxeHDh+VGEADWrVuHf/7znzh06BBSUlLQuXNnLF26FIcO\nHcJrr72G1157DadOnZKP97USNm7ciOXLl2PLli3IyclRieaOHTtw2223wWQy4ZNPPkFSUhIOHz6M\nQ4cOITExEStWrMDWrVuxcuVKbN++HbGxsfj73/8OQGqQ6+vrsX37duzbtw9///vfYTQaMXv2bAwe\nPBjz5s3DoUOHMHfuXM17HjlyJH788Ufs2rULffr0wXPPPSfve/3113Hy5El888032LdvH55//nkw\nDIPS0lLMnDkTjz76KPbs2YO1a9eq3IW++NbF1q1bsWrVKmzYsAEA0L9/f/zwww/Yv38/JkyYgGee\neUZu7JcvX44NGzbg008/xcGDB7FgwQKYTCa/87711lsoKCjADz/8gM2bN8NsNuP9998HAHz22WdI\nTk7G3r17sWvXLsyePTtgWYnLCwkG4cekSZOwdu1a7Ny5E9deey2SkpI0j3vwwQdRWlqK7du3N3m+\niRMnYt68edi5cyceeeQR/OpXv9KMT7SGKVOmIC0tDSzLQqfTYeTIkbJFc+utt+KOO+5o0hp69NFH\n0aVLF8TGxmLUqFEqcfnll1+ajFt8++23eOaZZ5CUlAS9Xo8nn3wSmzZtgiAI0Ol0qKmpwfnz58Ew\nDPr06YOoqKiQ7+u+++5DRESEfN6cnBzU19dDFEWsXr0ac+fORWJiIhiGwYABA6DX67Fu3Trccccd\nuPfee8FxHOLi4poUDF/++Mc/IiYmBgaDAQAwYcIExMbGgmVZPPbYY3A4HDh//jwAYNWqVZg9ezZS\nU1MBADfeeCPi4uIAqK3FVatW4aWXXkJMTAwiIyMxc+ZM2R2p0+lQXl6OoqIicByHwYMHh1xW4tJC\nMQzCj4kTJ+K3v/0tioqKMGnSpIDHGQwG/OlPf8KiRYvwzjvvNHnO8ePHY/z48eB5Hj/99BOeffZZ\n9O3bF3fccUeblDk5OVn1PSsrCx988AHy8/MhCAJsNhtuvPHGgL/v3Lmz/DkiIgLl5eUApEZv165d\neOmllwL+tqSkBE899RRYlpV/o9PpUFFRgUmTJuHixYuYM2cOLBYLJkyYgDlz5oQUGxAEAQsXLsSm\nTZtQXV0NhmHAMAyqq6vhcDjgcDhwzTXX+P2utLRUc3uo+Nbl8uXLsWrVKrlOGhoaUF1dDQC4ePFi\n0GtVVVXBarWqYlOCIMiC8vjjj2PJkiWYMWMGGIbBAw88gJkzZ7a4/ET4IAuD8KNbt27o3r07tm3b\nhrvvvrvJY++77z5YLBb8+OOPIZ2b4ziMHTsWN954I06fPt0WxQWgdn84HA48/fTT+MMf/oDdu3dj\n//79SE9PbzI+Eohjx46hR48eSEhI8LuOh5SUFHzyySfYt28f9u3bh/379+PIkSNISkqCTqfDk08+\niczMTHz99df45ZdfZFdasOD5unXr8PPPP2PFihU4cOAAtm7dKt9DQkICjEYjCgsLNcujtR0AIiMj\nYbPZ5O8eEVCiLNeBAwfw6aefYvHixdi/fz/279+P6OhouRzJyckBr+UhISEBERERWL9+vVxHBw4c\nwMGDBwEAUVFRePHFF/HTTz/ho48+wueff449e/Y0eU7i8kCCQWiyYMECrFixQvZHB4LjODz11FP4\n5JNPAh6zZs0aZGVloaGhAaIoIisrC2fPnkX//v2bXa4uXbqguLi4ycbf6XTC6XQiISEBLMsiKysL\nO3fubPa1AMkdlZ6eLn/v3LkzampqUF9fL2/7zW9+g4ULF8ppx1VVVdiyZQsAYO/evcjLy4MgCIiM\njIROp5Otiy5dushBYS0aGhpgMBgQGxuLxsZGvPPOO3JjzjAM7rvvPrz++usoKyuDIAg4cuQInE4n\nJkyYgN27d2Pjxo3geR41NTXIyckBIAWiN2/eDJvNhoKCAnz33XdN3n9DQwN0Oh3i4+PhcDiwZMkS\nNDQ0yPsfeOABLFq0CAUFBQCA3Nxc1NbWqs7hsRoWLFiAqqoqAIDZbMaOHTvkOvaITmRkJDiOo+ys\ndkrYBWPbtm0YN24cxo4dG9BvvWHDBmRkZGDChAmqoB5xaVH2LK+55hr07dtXc58v48ePR1JSkl/q\nrYfo6Gh89NFHcjbPO++8g1deeQWDBg0KeP1AjBs3DqIoYtiwYbjvvvs0fxcVFYW//e1vePrppzF0\n6FBs2LABY8aMCXjOpq6blZWlil9ce+21yMjIwJgxYzB06FCUl5fjd7/7HcaMGYMZM2Zg8ODBeOih\nh5CdnQ0AqKiowKxZszB48GCMHz8ew4YNw8SJEwFIcZONGzdi2LBhePXVV/2uPXnyZKSkpCA9PR3j\nx4/HwIEDVftffPFF3HDDDZg6dSqGDRuGd955B6IoIiUlBUuXLsXy5csxdOhQTJkyRRaMxx57DHq9\nHnfccQdeeuklTJgwocm6GDFiBEaMGIGxY8dizJgxiIiIULmsfv/73+Oee+6R733u3LmyBaM813PP\nPYfU1FQ8+OCDuPXWWzFjxgzk5+cDAPLz8/HYY49h4MCBmDZtGqZPn44hQ4YE/JsQlw8mnCvuCYKA\nsWPH4vPPP0dSUhKmTp2KhQsXIi0tTT6moKAAs2fPxr///W9ER0ejqqoKnTp1CleRCCJkKisrMXny\n5KBBfYK4WgirhZGdnY3U1FR0794der0eGRkZsqnu4dtvv8XDDz+M6OhoACCxINoNFoulyWA3QVxt\nhDVLymw2IyUlRf7etWtX1XQSAGSzdNq0aRBFEU8++SRGjBgRzmIRREj07NkTPXv2vNzFIIh2Q1gF\nIxRvF8/zKCwsxMqVK1FSUoLp06cjMzNTtjgIgiCI9kFYXVLJycly5gggWRy+g8C6du2KMWPGgGVZ\n9OjRA7169ZKtjkCEMexCEARBBCCsFka/fv1QWFiI4uJiJCYmIjMz028qiV//+tfIzMzE5MmTUVVV\nhYKCgqADgRiGQXm5JZxFv2JITIyhunBDdeGF6sIL1YWXxMSYVv0+rILBcRzmzZuHGTNmQBRFTJ06\nFWlpaVi8eDH69euHUaNGYcSIEdi5cycyMjLAcRxeeOEFeWoBgiAIov0Q1rTacEI9BgnqPXmhuvBC\ndeGF6sJLay0MGulNEARBhAQJBkEQBBESJBgEQRBESJBgEARBECFBgkEQBEGEBAkGQRAEERIkGARB\nEERIkGAQBEEQIUGCQRAEQYQECQZBEAQREiQYBEEQREiQYBAEQRAhQYJBEARBhAQJBoA6hwX/PvkN\nNpz/8XIXhSAIot0S1vUwrhROVuZi78WDAIDR16TDpDNe5hIRBEG0P8jCAMCLvPxZEIXLWBKCIIj2\nCwkGAEGxhhQJBkEQhDYkGFCLhAASDIIgCC1IMOAjGGRhEARBaEKCAUBUiAQvkGAQBEFocVUKRo29\nFg7eKX/nFYIhkkuKIAhCk6tOMGwuO/6281W8tv9deZuoCHrz5JIiCILQ5OoTDN4GAChrrJC3KQPd\nIgkGQRCEJledYDAat6y0KsjCIAiC0KZDC0aj04ptRbtU8QqtGIWoypIS/fYTBEEQHXxqkK9yv8Oh\nsmzUOiyYcO1YANpZUOqBe7zffoIgCKKDWxhFlhIAQLkiXqFlYQhkYRAEQQSlQwuGRwhYxnubWjEK\nGrhHEAQRnLALxrZt2zBu3DiMHTsWS5cu9du/Zs0a3H777ZgyZQqmTJmCVatWtdm1BUjWglIwtARB\nmSVFgkEQBKFNWGMYgiBg/vz5+Pzzz5GUlISpU6dizJgxSEtLUx2XkZGBuXPntvn1PeMrGDDeMmla\nGDT5IEEQRDDCamFkZ2cjNTUV3bt3h16vR0ZGBrZs2eJ3nBimuIHXJeUVDK2pzMklRRAEEZywCobZ\nbEZKSor8vWvXrigrK/M7bvPmzZg0aRKefvppXLx4sc2u73E1MQFcUk7B5beNpywpgiAITcIqGKFY\nDqNHj8bWrVvx/fff4/bbb8eLL77Y5tcPFMNw8A73cd5tDXbvmA2CIAjCS1hjGMnJySgpKZG/m81m\nJCUlqY6Ji4uTPz/44IN4++23Qzp3YmJM8IPcnqjICIN8fAVM8u7YeCO6RMVAf46Tt8XEGEM7dzvi\nSitvOKG68EJ14YXqom0Iq2D069cPhYWFKC4uRmJiIjIzM7Fw4ULVMeXl5UhMTAQAbNmyBdddd11I\n5y4vtwQ9hhck95Ld5pKPr6qul/dfLK+G2KhHo9Uhb6uubQjp3O2FxMSYK6q84YTqwgvVhReqCy+t\nFc6wCgbHcZg3bx5mzJgBURQxdepUpKWlYfHixejXrx9GjRqFL774Alu3boVOp0NcXBxee+21Nrt+\nsHEYDncMQzmYT6D1MAiCIDQJ+9Qg6enpSE9PV22bNWuW/HnOnDmYM2dOWK7tSZdlGO20WqcgWRZK\nEXEJFPQmCILQokOP9BY9A/cQKOgtBbhpxT2CIIjgdFjBqKi1yu4lRjUOQ2lhSIIhqBZQIguDIAhC\niw4rGC98uFsWBzbASG+tcRgusjAIgiA06bCCAQAKw0JGaxyGQC4pgiCIoHRowfDgmYQQ8LUmtEZ6\nk2AQBEFocXUIRgBB8GREKbdRWi1BEIQ2V51gKFfUcwpO/FSYhdM1Z+VtFPQmCILQpkMu0eobhxAD\nTF/uElxYf35zk78lCIIgJDqkhWF3qK0E5QJJvEYMQwnFMAiCILTpkIJh8xGMBpt3riilteHUEAyK\nYRAEQWhzVQjGmeIa+TOvimFoWRgUwyAIgtDiqhCMQEuwarukwrP6H0EQxJVOBxUMtRAEWoJV2yVF\nFgZBEIQWHVQw1I0+H3Achv/qehT0JgiC0KaDCoYLUIzuFgNYGC5Rw8IgwSAIgtCkQwqG3cEDjFIk\ntGMYTp7SagmCIEKlQwqGwyUAjLZIBLMwRBIMgiAITTrkSG8XLwCst+G3O1348PtjiE7LwxnLGXm7\ndlotCQZBEIQWHVIweF5UuaTAiDhwthCmmL2q47TSaimGQRAEoU2HdEm5BAEMq8iU0lgXQzqOBIMg\nCCJUOqZg8KLKJRVl4jSP0xyHQQP3CIIgNOmYguESAIWFkdQpAso0WwBgwcpreitRTlRIEARBeOmY\ngiGIYBQWBsOIqqwpAICokxdQUkIuKYIgCG06pmDwagtDSzAYgdMc6U2CQRAEoU0HFQwX2Ngq+ftF\n60UweofqGJHnNGMYNA6DIAhCmw4pGMW6Q9CnnJe/23g7jDftUx0j8tqBcAEU9CYIgtCiQwqGhSsO\negzv1B6CQhYGQRCENh1SMBhRH/QY0aUtGJQlRRAEoU2HFAwI2u4mJSIfQDDIwiAIgtAk7IKxbds2\njBs3DmPHjsXSpUsDHrdx40b07t0bJ06caPU1GSGEGU98BCMlojtEgYFIMQyCIAhNwioYgiBg/vz5\nWLZsGdavX4/MzEycPXvW77iGhgZ8+eWXGDBgQBtdOLhgKC2MCFs3PNzzdwAYimEQBEEEIKyCkZ2d\njdTUVHTv3h16vR4ZGRnYsmWL33GLFi3Cf//3f0OvDx57CIkA7iYPDBjVMRwM0LEcIDKUJUUQBBGA\nsAqG2WxGSkqK/L1r164oKytTHXPq1ClcvHgRI0eObLPrBkqZ9aBnjBAVcQ6O4cCyDCAyECnoTRAE\noUlYpzcXg0zkJ4oiFixYgDfeeCPk33hITIwJfN4gv43QG1GvEBW9jkOXztEAGDBM0+duj1xp5Q0n\nVBdeqC68UF20DWEVjOTkZJSUlMjfzWYzkpKS5O8NDQ04c+YMHnnkEYiiiIqKCvzpT3/Chx9+iL59\n+zZ57vJyS8B9vMYcUUoMPhaGKDCorWkERAa8wDd57vZGYmLMFVXecEJ14YXqwgvVhZfWCmdYBaNf\nv34oLCxEcXExEhMTkZmZiYULF8r7o6OjsXv3bvn7I488gpdeegl9+vRp1XWDTVEeoTepUm85hiWX\nFEEQRBDCKhgcx2HevHmYMWMGRFHE1KlTkZaWhsWLF6Nfv34YNWqU6niGYUJ2STVFsLEUPWJScFpp\nhIgsWIaBKFJaLUEQRCDCvkRreno60tPTVdtmzZqleey///3vNrmmCAEMgFE9huPnoh2qfcmRSXjg\nhonY+mOm4geMZGGALAyCIIhAdMiR3h6X1K+6DfXbd0+vX8PA6QFVJhUDTnZJkYVBEAShRYcTDFEU\nZSuBZfxvz7NNFfT2WBgkGARBEAHpcILh4r0NPsswfvs5t2BE6k3ejQIjHSsyCJ6USxAEcXXSAQVD\nkFfXa8rCeO0Pw+Vtouh2SVEMgyAIIiAdTjB4wbscK6NxeywjuaJiIg3ejeSSIgiCCEqHEwwXL8Dj\nVmrKJaVEEL1Bb9+1vwmCIAiJDicYgsLCYBkWg5NuUe3XEozrr4kiC4MgCCIIHU4weEEEoxCMGTdP\nx8ged8j7PS4pADByklsqMtojEgxAMQyCIAhNOpxgCILXQmDcLikd6xUJZSA8UhcJAGhwNkrHgwEY\nWnWPIAhCi6CCYTabL0U52gxl0Jt1356eUax9oRCMLhGdAACe2Ug8QfIXtr8SdAJDgiCIq42ggnH/\n/ffjz3/+s2qSwPaMSjDcFgYXwMJ45KbfYHDSLZiUNs69RTre6rLB4qy/NAUmCIK4QggqGFu3bsWY\nMWPw3nvv4d5778XKlStRX99+G1PJJaUeh6FjvRaGUjA6RyRgxs3TkWCKB6BOw9Uaw0EQBHE1E7RV\nNBgMmDx5Mr755hv885//xCeffIL09HTMnz8flZWVl6KMzUI1DkNDMJTWhi8svGm4bTFrLkEQREci\npG50cXEx3nnnHTz77LO4/fbb8emnn6Jz5854/PHHw12+ZqNKq3ULgC5ADMMfr2BQ4JsgCEJN0OnN\nn3jiCeTl5eGhhx7C6tWrkZCQAAAYNGgQNmzYEPYCNhdeUA7ck8Qh2hAl72/K1cSChSfUzZNgEARB\nqAgqGJMmTcLdd98NjvN35axfvz4shWoNXguDkdNqY/TR8v6mBINR7BNEypIiCIJQEtQlFRcXh8bG\nRvl7XV1du86Y4kVp4B6jcC/FGryCwTGBYxgMuaQIgiACElQw3nzzTURHexvc6OhovPnmm2EtVGvg\nec/Eg97GP8bgXfi8qRiG0voglxRBEISaoIIhiqLs2gEAlmXB8+3XXeNxSSlTZCN03rUvmnRJKUSG\nJ5cUQRCEiqCCERUVhaNHj8rfjx49isjIyLAWqjXw7nEYysZfJXhNuKRYKGMYZGEQBEEoCRr0fv75\n5/Hkk0/iuuuuAwCcOXMGS5YsCXvBWoogui0MjanNAe0pzz2og94kGARBEEqCCsbAgQORmZmJI0eO\nQBRFDBw4EHFxcZeibC3CM3CP9TGeJl47Dudq85sMeivFhOaSIgiCUBNUMAApU2rkyJHhLkubIGi4\npABgbM/RQX+rtEoo6E0QBKEmaAwjJycHv/nNb3DLLbfgpptukv+1VyQLAwFdUk2iWG3vqy25KC5v\nv3NmEQRBXGqCCsYrr7yCZ555BqmpqcjKysLMmTMxe/bsS1G2FqGVJRUqjEIwiivqsXxDTlsWjSAI\n4oomaKvqcDhw++23QxRFJCUlYfbs2di+ffulKFuL8GRJNRXcDoTqJ4wIp4vcUgRBEB6CCgbLSofE\nxcUhJycH1dXVKC4uDnvBWopnidaWWBhKlxQY0T0vFUEQBAGEEPTOyMhAdXU1Zs6ciWnTpkEQBMya\nNetSlK1FCK2yMETVZ8+ocYIgCCKIYAiCgNtvvx0JCQlIT0/Hvn37YLfbVVOFBGPbtm1YsGABRFHE\n/fffj5kzZ6r2f/3111i5ciU4jkNUVBT+8Y9/IC0trWV3A/dstYzYsgWQfFxSZGEQBEF4abJVZVkW\nf/vb3+Tver2+WWIhCALmz5+PZcuWYf369cjMzMTZs2dVx0yYMAHr1q3D2rVr8fjjj+O1115r5i34\nXrMVQW8oXVICXAJZGARBEB6CtqppaWkoKipq0cmzs7ORmpqK7t27Q6/XIyMjA1u2bFEdExXlXaui\nsbFRjpm0FHngXossDJ8YBrmkCIIgZILGMKqqqjBx4kQMHjxYNYfUokWLgp7cbDYjJSVF/t61a1cc\nO3bM77iVK1fi888/h8vlwooVK0ItuyaC6Fk8qXXjMMCI4LucxvGKLri5S/sdd0IQBHGpCCnonZGR\n0aKTh7ou9vTp0zF9+nRkZmbigw8+wOuvvx70N4mJMZrbjUY94BCh1+kCHhMInY4BXNJnhnMBKXn4\nMDsH3/7mw2ad51LT3PvsyFBdeKG68EJ10TYEFYwpU6a0+OTJyckoKSmRv5vNZiQlJQU8/t5778XL\nL78c0rnLyy2a2y31dgAiRCHwMYHgeUWQW+cIeq32QGJiTLsu36WE6sIL1YUXqgsvrRXOoIIxa9Ys\nzWk2QnFJ9evXD4WFhSguLkZiYiIyMzOxcOFC1TEFBQVITU0FAPz888/o2bNniEXXRhBEgG1ZWq3I\niJ7lwMHonK0qB0EQREcjqGCMGjVK/my327Fp06aQ0145jsO8efMwY8YMiKKIqVOnIi0tDYsXL0a/\nfv0watQofPnll9i9ezf0ej1iY2PxxhtvtPxuALhEAQzT9EJJAcurmMmWBIMgCEJNs11S9913H/7n\nf/4n5Aukp6cjPT1dtU058E+ZttsWCO6xEy0RjP6mdGyuKwTD8SqXFEEQBBFCWq0vDMO0OM32UuCS\nBaP5LqkYXRwceYMBAAwJBkEQhIpmxTBEUURubi5uv/32sBespbTGwuBYBhCleyWXFEEQhJpmxTA4\njsOMGTMwYMCAsBaqNXim8+BaKBiiRzD0JBgEQRBKwppWezngW2Fh6DhWtjAIgiAINUFb1WnTpqG2\ntlb+XlNTg+nTp4e1UK3B5V5atSVTjBgNHCD6/47W9yYIgghBMBobGxEXFyd/j4+PR319+126VGiF\nS8pk4DQtDJdIgkEQBBG0VRUEAY2NjfL3hoYG8Hz7bUB5d+POsVyQI/0xGXTagiG4Wl0ugiCIK52g\nMYzx48djxowZmDZtGgDgq6++wsSJE8NesJbiEqV0WBNnaPZvA1kYToEC4ARBEEEF449//COSkpKw\ndetWiKKIhx56CJMnT74UZWsRLlFq3I06Y7N/azJwUK+i5D4nWRgEQRDBBQOQMqWulGwpwT3dbEst\nDJFcUgRBEJoEjWH8+c9/Rk1Njfy9uroaTz/9dFgL1RpccFsYXPMtjEBZUk4SDIIgiOCCceHCBcTH\nx8vfExISUFhYGNZCtQaXKDXuhhZYGByrPQ7DwVMMgyAIIqhg8DyvyopyOp1wONrvPEs8pLIZWyAY\nADQFw+4kwSAIgggawxg+fDhmz56NRx99FACwYsUKv9ln2xOeGEZLLAwAmoJhdbZfgSQIgrhUBBWM\nOXPm4OOPP5aXTR01ahSGDRsW9oK1FF6OYbRUMPyNLhIMgiCIEFxSer0eTz31FN5//33cdddd+OGH\nH/DXv/71UpStRQiMZGG0JOgtwUAU1NXicJFLiiAIokkLw+VyYevWrfjuu+9w5MgRuFwuLFu2rF3P\nVutxSbXYwgAAgQNY7/reNhIMgiCIwBbGa6+9hjvvvBNff/01xo8fj6ysLMTFxbVrsQCUFkbLBOOv\nvx2MCL36txabDau3nUNtA7mmCIK4egloYXz11VcYOHAgZs6cidtuuw0A5IWU2jMi4wKDlge9r+sR\nh+hCI2yoDujAAAAgAElEQVS2BnnbgbwSVJ5lcMFswdMP3NJGJSUIgriyCCgYO3bswLp16/Dmm2+i\ntrYWkydPbteTDnoQWUkwWh7DUE6NzgAQYbHbAACVdfZWl48gCOJKJaBLKjY2FtOnT8fq1avx/vvv\no7a2FjabDdOnT8fXX399KcvYLES29TEMxl0tBkYSHY+b6wowsAiCIMJGSItG9O7dG3PnzsX27dsx\nffp0bNmyJdzlahGCKIJheUBkWjS9uQfWrQwGxgQA0jmhNS0hQRDE1UNIkw960Ov1uPfee3HvvfeG\nqzytgudFgBHAiC0XC8C7vKue1QMCAM49lxQpBkEQVzHNX5auHePiBYAVwKJ1gsG4lUHHSHoqWxjk\nkyII4iqmQwkGL7gtjFYKhsclpWP07g28e3urTksQBHFF0+EEg2EFsK10SXmC3hzLQRQBcLy8hyAI\n4mqlYwmGxyXFtI2FAYiAwIFhKUuKIAiiQwmGy+2SanUMg/FUiwjwOtnCIMEgCOJqJuyCsW3bNowb\nNw5jx47F0qVL/fZ//vnnyMjIwKRJk/D73/8epaWlLb6WZGHwbRb0FhkRosAp0mpJMQiCuHoJq2AI\ngoD58+dj2bJlWL9+PTIzM3H27FnVMX369MHq1avx/fff4+6778abb77Z4us5XTwYBuCaly3sh8ol\nxXNyWi1ZGARBXM2EVTCys7ORmpqK7t27Q6/XIyMjw2/Q39ChQ2E0SiOqBwwYALPZ3OLr2d1LqbY2\nhqF0SYkC586SEsm+IAjiqiasgmE2m5GSkiJ/79q1K8rKygIev2rVqlat5md3L3TEtTbo7XFJuWMY\nDAMpXZdMDIIgrmJa57sJgiiKIR/7/fff48SJE/jiiy9COj4xMcZvm6myAgBg1Bk194fKr3oNQk71\naQxM6Y8LxYekjRwPg0HXqvOGi/ZYpssF1YUXqgsvVBdtQ1gFIzk5GSUlJfJ3s9mMpKQkv+N27dqF\npUuX4ssvv4Rerw/p3OXlFr9tFVW1AABRYDT3h8rAuIH429Du4BzR+F48LG1kBDidrladNxwkJsa0\nuzJdLqguvFBdeKG68NJa4QyrS6pfv34oLCxEcXExHA4HMjMzMWbMGNUxJ0+exMsvv4wPP/wQCQkJ\nrbqeJ4aha/U4DBbdopOh43SAe7lWhlxSBEFc5YTVwuA4DvPmzcOMGTMgiiKmTp2KtLQ0LF68GP36\n9cOoUaPw1ltvwWq14umnn4YoiujWrRs++OCDFl3PyUvZTJ45oFqLjmMA0T3V+fWHwdti2+S8BEEQ\nVyJhFQwASE9P9wtkz5o1S/782Weftdm1HG4Lg2Pb5rY4jpUFg42yoNywF0DLg/IEQRBXMh1qpLfD\nbWHo20owWAai4K0igXG2yXkJgiCuRDqUYDgFdwyjDQXDY2FIUAyDIIirlw4lGK42jmFwHCMHvQFp\napAfC37Bq3sXwim42uQaBEEQVwodSjAcotslxbWNYLAMA4hKq4JBTtVplDRcRJ2d0vQIgri66FCC\n4XIHvfVsaGM5gsEwjHoxJhGwOOsBAA7B0SbXIAiCuFLoUIJhd0mCYdS1jWAAUM98KzKoc0iWhZ23\nt9k12gKn4EJ+XWGzRtd3VBy8EwV1Fy53MdoFdt6Bwrqiy12MdoHNZcMFS/HlLsYVTYcSjEaH1OuP\ndk9m2BawiioSIaLe0QAAcPDty8JYeWoV3jqwBMcqTl7uolx2lp9YiTcP/At51WeDH9zB+fDocrxx\nYDE1lADePfQRXt+/CGWN5Ze7KFcsHUowrC6pEY+JMLXZOZUz3wqMQ5qQEFLPrT1xwCxNYZJPPWtZ\nNIuokcTpmnMAgNKGls8C3VEoqpemKaqwVl3mkly5dCjBsDsll1SbCobCJcVzXjdUexMMz7QlHkEj\nQDVBaELPRcvpUIJhc1sYkQZDm51TKRgi6xWJ9hbDoNUA/SHx9EKxLSVUFy2lQwmGZ2qQthq4BwRe\nW6PdWRju/6lhIIimoXek5XQYwRBFUZ58sK3SaoHAq/fll1Uj70JNm12n1dBMun5Qw+CFrC0vVBct\np8MIht3JQwAPoO1Gekvn0haMPaeK8frKQ6i3to/5pbyrkNPLQPhDT4UXXhQudxGuWDqMYNRbnQAr\nPQhtNdIbCOySYlhJnBrajWA0HfQ+W1KLV784gGqLN/bCCzyWHPkUe0sP4oKlGG8fWNKhMkiaI55O\nwYVFh5fiUFk2ztcW4O0D76PGXhvG0l1axGY0knbegXcPfYhjFSeRV30W7xx8X04nvxQIogi7kw/b\n+V3NmNan0WnFOwc/wKmqPJyszMXCgx/A6rKGrWztnbBPb36psDl4MIz0UrRpDIMNsBgTJz3Q4Xyw\nm4XHJRWgjVy8KhuWRic27C7A9LtvAACUNJhxqioPp6rykBLVFaUNZvxw9j+YcfP0S1ToMNOMbvXZ\nmvPIqz6DvOoziDXEoM5hwab8n/GbGyeHr3yXkOY0kscqTuJMzXmcqTkPlmEhiAKyincho9ddYSyh\nl7e/Ooycwhosff5O6Li279M2Zx64/ebDOFebjyVHPpW37bt4GCN7/KrNy3Ul0GEsDLuDly2MtnRJ\ncYHO5bYwbI52IhhuAvWqBcF/u15DDHmxfd1Pa2iO60HZyfDUYUeqi+Y0klodLkG4dHWRUyjFBsP1\nbrmE0L0CWi7pjvRcNJcrVjB4gVdlKtkcPMBIf8i2Wg8D8HdJiU4pZZdxWxiOdmJhsB6XlChqZnBJ\n8R0RDCOZ2YBk+su/Z6RHwdPIWl02CFe4r9cpODVH5Dt4h3xvnrpQ/p09nwW5Lqwdti7s7roQRVGu\nC2XSiP9zYb1kyQROV3jq3Mlr14XNZYcoihBFUXY76Tn/BBrls3O1JVZcsYLxjz1vYU7WXPm7zeEC\nWAEMmMBupBZQWmFTfRftEdIHzuW+bvsQDE/Y++eiHZiTNRfnawvkPQ3ORqD/f6C/NhvFTDae3/4y\nTlXlwSV6e52cu2EQRAE2lw3Pbftf/Ethhl+JbCrYitlZc1FcXypvq7XXYXbWXHyduwbrz23G89tf\nxrnafFWvkVXUhcVRj+e2vYxPjn1xycvflqw7twmzs+aqpsWosFZhTtZcrD2zAavPrMfz219GcX2p\n3PkAvB0RQRRQbavBc9texoqTX1+SMjtd4Xm3vjuzHrOz5qLa5s1yLG0w49lt87Dh/I/4Kvc7PLft\nZZQ1VgSwtgRcbCjD89tfxjd5a8NSxvbKFSsYFTYpOOtRe5uDB8MK6skC24CGRvVDK7oMEGwRYE31\nAMIbnGsOvlm1+y4ekj97gre6LqUoZrIBAEfKj4MXlI2kt1dd655gMa/6TDiLfMk4XHZM/lzWWAEA\n2FmyF//J/wkAcKIy16cuPL1qHuVW6fjsihOXqrhh5WRlnvzZM1XGlgvbsPXCdgBAXvVZVUdCfi4g\nyPNR7XdPQxNuHGGyMDycrc33fq45DwDYkP8TdpbsAwAU1F3QTBYQIOCc+7fbi3eHtYztjStWMDxY\n3Wa05JIS2jR+AQBjh/RUfTewRojWGDB6J6B3tBvB8A1dWF1ey0hpNovuxAAWjCoQyroVhxcFzTHj\nVrsLLl798giiiCWrj+GnA+17/iqboi60rE8GjE8j6XXvNbXKYqHZglc+24eKWv+smUZb+8ie80X5\nXJg4/0k6GTBwKcRTaXkyl3isTyCXlCiK+GDtcWzcW9iq8yvrItYQ47efYdR14UFyz12d456uSMHI\nyfemflocVjh4J/KteQArgGvD+AUATBl+neq7iTNBaJQeLjbCIgXbFdTYa+XJ7wRRwKGybFUansVR\nj1NVedCiuL4UedVncaTsWLN8o5W1NjTa1eVQvgzKxtAFyXfr+zJ4PgsiD9+XodHmxJPvbsPSH9S9\nbEujE4fyynEoT3v2zypbNU5U5gKQYk4HzUdhc3nTemvtloBWTKGlCKerzyK7vPU9e1VdaAR/pbrw\nbmfg9ds3NeXK+2uOodBcj9Xbzqm27zlxEU+9tx0HcsrkbRXWSvnv7hRcOGg+qvKj19hrcbpafR4P\nBXUXcLr6LI5XnGrqNkPCynufRa26YBnfjoR2XRRcbPkCYubGcvnv7uAdOGg+KgflRVEEY7CCja7W\nFIzztQXINufhUOkJfPtz6yxgm+od8RcG346E5/4FgW8z8SxtMOOM27qxuew4aD6qsnYrrJU4X6st\njGdqziOv+ixyqk63SVlC4YoUjL9+uFP+XN3YgG/z1uKQYyNYU2ObWxg6nx5plF4hGJEWPwvj9f2L\n8FH25yipv4jdJfux7PiX+PLU/8n7Fx78AEuOfIpCi/8aBQv2vYtFhz/GJ8e/aNbU3BfK6jUsDG/D\n4BkBr4QFq3oZPI1XlcXmZ4afKZZcWgdy1cJQ4x7TESiO8/c9b+GDo8tQbavBz0U7sPzESvzf6e/l\n/Qv2LcSiw0s1p5t+Y/9ivHf4Y3x8bEWr13NQ1YVWIwlWJZ4WmycpQPBz9YmiiHU7z+NMUS3sTqme\nDDr1M/LzYcl1s+Wgt9wv734DS458ikanFRvP/4TlJ1Zi3blN3v27Xsd7hz9Crc9KjqIo4s0D/8J7\nhz/Gh9mftXpqbmUjqVUXDMOqGk9PxpggqJ+Jv3++v8Vl+Meet7Do8FLwAo81ZzZg+YmV2FzwMwDA\nxQswDciCsc9eNDjUlptLcOHtg+9j6cllMN54END5z+dmd/BY+WOearxRXmE11mzzf58aVe+Iv0XI\nMqymq9IpuNrMvvjn3nfw7qEPAQBf567B8hMrsU3h5np59xt4++ASv6SLRmcj3j30IRYd/hj/OvLJ\nJZuq6IoUDGXPo87WgNOKxrUtB+0B0kMyb9hz8vdoYyREqyQYTKRFbixdvIB6qxMWh7QiX3Fthezz\n9UwxDQBlbp94sCVeq+yhTzui0/k/vqH1qr0vg+eBq6hthM2pfnnyA/QmaxuklzKQW85z3TqHBWer\npSC8Mhhf75QGgzU41Q2D78tR66jTPH+oWF02rNych882nNJMqfTtVTfYbe5y8H6B1+LyBqzZfh4L\nvjwoZ8gZ9OrXyKCXBETLB2912WTfuXKRJ08jbePVSRYOn/L61lVzaQzyXPi6Kj3PhUt0+fWqeaF1\nMQYbb8cZ97vhSUxQdj58BUP5TAPeTEUPdQ0OZO7Jx5aDRVj0f0fl7c8u2oZ1u8/7XT+QFe5h+9FS\nVV14EiPsgqPNJ/t08k7kVkuWwoYjx5F1RD01v83n3n3rwsE7UFzRgL98vBtF5fVtWjYlV6RgKLHY\nG1UpkW2ZUushOSpJ/hylj8DYW24ECx3YCIvcaHy49jhmLdouH/fxumMoqZL+cKFYPXaXuofg8R2H\ngsMp+LlUg70MDMOAV7wMckPFiKi1qh/G/FJJMDrFqn3eNfVSmW0OHrwgYM/Ji35xDkBaCfHwGck9\no5V14tuI+84EHGg+r1BpdNmw5VARtmeXajaSosioc+vdDREvCigqV4ulskHzCKVRry6fQSf97Rwa\nWT523i5fS6su/BsGdaNpaXTg2LlKv99poZUKHNzCYFS9aqf7byNZoL6uytDHdmhhddnkZ9Mz3kHp\n4m1wNt1IesZCAcCR0xV45l87sHmfJMLFFT4j0xn/uvB07gDtusg+VwGHhnVudzlk67qtsPI2ud7r\n6l1YsTEXh097rUnfe7f5vCN23oGVm3NRVm3FF5ty27RsSq5IwdD18FaIxdaA+kbvw6CVN92WROhM\neHDUDegW1RVMRD1+OVqEilorjpaehq6HNzbBsALqGqU/slag1bdhqKhXP+AsGDgFFz47ugqf/LRb\nsyH2YLW7/F4Ia5CGgQULp6KR9BzPRtXhZIXXJ7rmTCYuOiWrwGRQN3A19W4Lw8Fj874LWPrDScx8\n6xdsOH4Qmec2y8eVVNXJ5dOaasX3ZWj0szh4OHgnvsldi4sNZWgKrdiPxe6tW6tTYyyCg1fHMNwD\nQE9V5aHI6vUfr8r7AWdqvdas51K+o5E9FobTKeBkZS425W/1Xt9lky27UOrC9zn5dMMxvLvqID49\n8g0qrE0Lh1bA1mPVSfv9rS1RFDU7GEfKjyO/zlsX+tSTOFUh1cWBnDKV++1gbjl+3O+fCHGs4iR+\nKsySv1tdVjhd0rUEQRIjq8N77UZno+r3vnUBlkdlfR1e+XEZ1uyR4oYeq473HajKagmGtzOgaYWz\nAup8Ok8AsN98CHkV3vv7Kuc7Vd2UVjbgi025ftZpWXUj/r0pF1a7C4fKsvHLBa9r/bON2bB6LHtR\nqot/rT4i728M8o44eAecsELf8zhELnxLL1yRU4Pou3nNy3qnDQLPyHdivASCAQBdIrqgqKEYjN6O\n1VnnYOyzV30gy0trjBu8vSdlY2b1cT1U1qt7snbeiUMlOThQuQ+uslSkHb0Gowf10CyTJ0NMtY23\nSWZuYR2+2HUSSPH/DR+h3UPcXu5t4H4qzAK6AigY5zdI0WNhNNpdqpl7M8u+UR13zlwll88TE1L2\nfn0byYp6tUm9YvNJpPevwbayXTDpjJiUdo9muStrrfIU90osTgukIA+Depv/y9RodyEmUtuttqdq\nm/z556Id7k/jVMf4irnn71xRa8P7R5ep9lldVtnC4FgOlbU2REcxiv0+DYNfQ2EHl1CPw1XH0MPc\nBeN6jtEsNwAsyzwORKu3VVqrpMCyjxvOQ1b2BQy6qZPm+Tac/1H+rOtaiBVnPsOGLVNRaJb+XqMH\ndQfDMHh/jZTGPGZwD7Cs994+yv5cdT6ry4Z6uwNggNOFdUB/oMZqUe1X3buPtcWwPD7ZsQXlhlw4\nXAyAnprllg72FwzlvGmaU6cwAsw12nNo5dgOyp93lOzFjpK9eHXofPyw4zx+OSKlK3dPjFK9s59t\nyEHuhRpwLINd3Jeq82UXmGG8yQWGAURB6oAwesXAZJ9793Vd2nk7GkwF0MUXwVbnfdkdTh42B4/Y\nqLZZI+iKtDCUNDqsqh68oY1jGL6Y9JJbJlLvXtWP5REVoSFSnEsSDHhdD0pXi+/LUNWgbiR3nSzC\nsi173OdyykG8eqsTe0+a1eJjd4Jh/XvWFxvLcPR0pWYjeaGsXrMH2hS+PnmlWX70bODe7skLZsBd\nPo7hsOt4KaoavHEJ37ow16on/attbMSmbClbqqC8StOKKDRb8Ng/NuOrrf7muAAejEnqrTbY/evi\nXGk1LBo9yVBxOH3E2uHJOPMvp2RhSI2Tpd6F5z/chR8Pn1HtBwBzdSP2nTLLaeMeGJYHEyk1qr4N\nqBKeF7A/76Lf9kaXVR6Xo2V55ptrYXWEHkD1iAXgH8uqD5JabHVZIbpnZ6ixSGWptgUWDD8Lg+NR\naZcsTkbX9LUYDcGoddTJFpfm1CmsgBP5FU2eV8mcJTtlsdDC8zyczPef4JPhXN53WHQ3y3rt9sLh\n5HHkXKny57DzDjj0UqdNUCz09q/Vx/DMv3bgfGmdKmuvpVz5guGywil6K6gt18LQwshJSm3SuRWb\n42EyaOT2cy44eG9PEpCCvx58X4Zaq1owTpdUgYmwyOdqtEsP9NIfTuDjH05g9wmpMcgvrcPq7drp\nhcX1pdLvNF4WvV47ttEUdQ0OrPwxT0468AS9g/7O1ii/sJW1Dny6/hS+yjou71fWhdXuwsqtJ9Un\n4FzgoqX6OVFYhiNn/F/i00VSI7gtW3sdb09dNmg0hgVldfjpYMtz+n1dD02N/le6pKrqpEYu6+R5\nxX4raurtePXfB/HR9yewJ9fnfjgXWLdgWJ2BRa6i1ia71nzxBJg1G0lGQKWlZTPT+sY0ausd+H8/\n5uGERgMJSHUhwlvGaosdtQrBsPE2rN1+Tvbl1/uJpwt2TmokGc7/XmwOlzdBJkBdlNRL75FTa34p\nRoCIlo+z8nWLJcRInc3Sykb/g5Xld78rSgtD+Y5k7i7AtuPq5zWvuAINkOq5rM6CI6eld+TEeWnb\nu98exQdrj6O1hN0ltW3bNixYsACiKOL+++/HzJkzVfsPHDiABQsWIDc3F++++y7uvvvuZp3/jOMw\nlIO723KmWi0i3BaG0T3oSZ9yDj/VHQcbpT5Ol3Ieojt4esFSjKd/fglDEofJ+60uG+oaHWAZBrwg\nYt3eMzBerzgB620YuE5l2C0sR3XW7TgbfxCmgQJOFMUhB1uxP6cM+l7aZf3i1LcwGDqBYRP99rkE\nV7MsDOMtv8B+dCS2HCxCgXgQxdwRONleAG4AIMLQex+E+ni4im70+62u+xk5o6VKLILp1hKcr0sD\nIjx14W0IDuSUQWDULy/D8oBJskh0XUqxrPAd3G8aj435W8AyLP46dDZ2NayDvpcDjF5bxIzXHwFf\nlwCro7f/TlaA3eVEqF0NY7/tsB8bIZWnRy726jeh9KfBEC6m4flpt6C002bohAS4iq/3++33ZzfI\nAcs6w3mYBhfAXuP9A1pdNhw7Vymvs5JbXA50UZaVBxshieeu0n3YZz6EKddlIPPcZhg4A/42dA6W\nHf8SUboEGK7L0Sz/h9mfoW/n3ugWlaxZF1WW0DOxjH13wX5Cmrn1q1NrcarhMLiuN4I398T6PeeQ\nzfyAnTu6YX6nh/1++395P4CH2wrvWohX9v8DaRE3y/uPnr+Ig6WRAIDlfxmN/Xm+4snDZagFA0CX\nXAAu8QKcF26EvsdpiC499uddj12N30N3DQuuk1mz/IsOf4wBXfrByET67WMYIaDQaGHovReOHOkd\n1/c6hu/rNsFUPAUjut8GO+9AbsT34JK6gS/7L//f9vI25vpu56HrWgi+3OvO2pNbhBuib0ZCjBFl\nNVY/gVy39wz0PS3u35/D0vx34Np+A0yD8yA6TKg/fgdMhtZ3psNqYQiCgPnz52PZsmVYv349MjMz\ncfasOh+6W7dueP311zFhwoRWX++G+DQMS7m11efRwp43CC7zNegaIWVMeSwNrpMZbJR/2qlvyp9L\n5LGr2Ju73ui04pnFO/Dy8n3IL63zewAYnROMydvTY1gRObb9gN4ORu9Efv15HCw7CrZTKXRd1Oap\nEoe+SvOhd/Au2TUi8sEfA9Zok3tBhY35ACOC6+R2eXAucLHV0Hc7j+t7xPn91rcuGFaAI8abXqvs\nPVXX2wHORzD0dvA6b12IjID/nP8J9c4G1DksyK0+g4uu89AlFoOLD+xC4GKrNd0tyoYhpLqIaPAG\n8eMqAUZEfmMeThfV4qKlBi5jFfTdz/pllQH+2S0MJ4CP8/YWyyx1qKhRWFw+bifWaAVj8J7DJbiw\n4fyPsqsppyoPOdWncbB8H9jowOnIJypzAvjtRdS6kzVEPnh2GhtVB88goOOVORAhgkuQGucD5/Kl\n/d1y8NyHO/x+6+uHd8GJHIuiF+zzHJRb1O8ZG1EPRqdIVuAE6LufAaNzgTVZsWLndhTWF0Kfkg/W\nGFgEj1Qcw84TGpYpKwKMdG+iEDyNlout9n6OL4MIUR54WmQpgUNXC0PPU2prwlN2nc/7z/HgOnvd\nWycKzXj2/Z1wuni4XALg44JjI+tUbmmG4+WOGhvRACbCgi5xEUHvIRhhFYzs7Gykpqaie/fu0Ov1\nyMjIwJYtW1THdOvWDTfccEObjJx8etAf0bezfw+3LRBqkuAs6CtnwHgEozkweu8fuapBevirLXZY\nGp1+DxEbVecXl1D+vhL5Aa/j55Zj/R9QycJwC4ZDepBEgYVgjfI7Vr6+u4zy/6ZGgOFVYhdhCvhz\nFSKnMLcVDcfFqkY/8WSj/RcyanB5zfpD5qN+++XruNQWp+8YEwAAI4DxNAzuuhDsJgj2Jm7GU0Z3\n3TKR9QAEfLjOm9kSGer7qXj59+YUyVONdIo1wi74pBhH+4/PaVBkEy3d/qPffg++dRHIDVNRJ51P\ndEj3LzTEQHQ10Tt1p7d6FhWTLGNRTk8GoNlIaqGMRSifA0EQYfcRGK26UL4jus6BO1IRnM/fVsuS\nUFoYLkn8+drOclBaGxFRJp2cgexx/ymTPIb01U4q8Lu84l489feXj/fgTEmtxjvSdF2wkRYkxof4\ncjZBWAXDbDYjJcUbse/atSvKylofeLmceATD0ALBUFLeUA02tgKMwYq9F05IvVYFWo2kEq5T4Hrk\nGE41sEjrYSqutOBAgWTteRoGhhUAIXCvkjFYwRisgLs3xDAAl1AGNtrbs7JFNX9UdqW1CvNXb8Ci\n7/di/4WT4CLUPt5gdXG0iYkBRd4nFVjQqDfOG0iW60LnbLoujFZAb5N7hgwrgI2rQJldEfSMD9xg\nBTyvqRF7L5wAY7QisUcDGGPz6qKp50J0qZ/Z0gZ/N01kBMC4XV6eqfzBuZq0Njx14WnUGJ0LbEwV\n2Cjvc+exOpoDY2oAG1sB6G34n6XfoYFXW0zB6yLwNRle3XhKk4mqiYpSbHd5ljVwAU3UBWu0Ys70\n3rJ1VOuoQ2FdkSrtNqVn8wfWsRH1YGMrUG2rhYUtAWPwFc+m64KNq4CDa/l0Lh7C6vC/lHPF63h/\nH2RbMvvBW3DsXCW6dZauY9SYuK051KMKxt5SQOocAF2IvVHRqYco6Jo0sR28EzpGB6coPbRKU9mD\nrvNFeLZ6GkmgaTeEsY80i6eyh2W4Tt27v2DcieZSaCkC4otwEYBBI8QQCMFuAsMKquBgMCysf+aQ\nrou3kRed0t+V4XiITQiGqa80fYPSVWG88ZDqmMq4fSGXywMXUwOu9wEAQCEAXXTTx3sQbJFgdA4/\n14b6IHX/UDlbqwdnfL43JOhpJHUur3hoYOrn/zc33qSeOsTQq/lzgnFxVeDimr9ksGiNhj7Kpu1y\nc+OwsYBCM7Tcd474s3KPWnAYwEUC0DkhChwYaGdlGW/ZhrePbVNNKfPGgcWqY368+J9Qb0WGi69o\n0t0aiO7RKSiuvwhd54vIRyZ8U8KbS1gFIzk5GSUl3pfRbDYjKSmpiV+0DMFuwi2GSUhM9J9xsq0Y\nnRiD0cN6yt+TXP6+eiXOwhsh2KJgvEFqRETROwW57sKtiO3sRKUuR+WPBgDHuZthuDZwNoNgjQFn\nvtSov7cAACAASURBVAlOUxn013gH2Ik8B74mCbrOpRDAQ+ARsv2oFIymetUeGFaA0BArWRz6ptMZ\nAcBR0Bs6PhrstVJDqKyLJ2/9A1Zs2QNLVK5PaiQDR/5NMPQ86X9CNzpnHKyFqTAm1AIp3nRa0aUH\n6juDib8ITic0Z6VWVcNo0hkRTIoYVgRviQcXaVG7YLTOLQLOwt4Q7ZHyc+EeHgJRBBynByE6wQpH\npzxV3EfkWbiKr4f+vwKP4BUaYjGwy63o05fFN8d/kLcnmOJQYdaBi6tE5zgjqp2h9269FoYTsIXW\nIePrOoGNqZZdfAHPLbBwXrgRoj3C7x0xsHpYcm4GG1EPXfezqmwv0aWD62JP6HtImYEMGL9VJjl7\nAl4YOxmL1/2M+jjvuyTYIgBeDzaqDnanCF+vVJPlVVgYTYmnqhwNSeCjgntUInQmTOs/CZ0i4vH2\nzo+l67nrIkofiT8OmY4SixnfHFunulfRYYSrops8Lk0UWL/MuL7J1+PhpEnIyjmF265XT6TaEsLq\nkurXrx8KCwtRXFwMh8OBzMxMjBkTeKBRSy0SobYLOhvjUV5uuWT/rPWKF9qp9u/ytZ1xW9KvINR4\nxVGwJLgPZlB/sTMcRb3AOvy7jwZLapM+UrExBjd06olZI+4DX9VV3j6g8wCIivhDc6rSxHobA18X\nTsByOI3ISNUeQKc6t7UbeHNPRNq7y9s8daFnDDh+gEV5bg+IPg0SyzB444HfeK+ncT8pkcmIZ1Ng\nvdALfI03lchV3h2dTPEAgGYnhijcNjGmEM0+pxGdGwcGPYyvSgZv7ql6LroaesjXFWqSMOPWCUiO\n7uLzQz1cZdd4v2uIumiNwU0JaUhPGo4Ywftc3J48RLaaWK5575fcSDJo0tpS/cZuguvCDUGP4ytT\nwJtTVXXRKyYVABBtiMZN8b0xrtdoxBli1ed3mFR1EaX3FzK9Mw7dddfgGnEghAbv7/mya7wrZmqk\nmjd5Xwr3XCidKgBwNJjgLAreSA9NHoTB8YPRy5gmb/O8I9G6aKSZrseIxOGINqjji4ItEryiLvSi\nvwJ24rqgp+la/G5ABm6Man18N6yCwXEc5s2bhxkzZmD8+PHIyMhAWloaFi9ejJ9/lmanPHbsGEaO\nHImNGzfi5ZdfbnG2VJe41gd0moMy6M3XqV/waKMRv79H7VsRLFKgSyeaIIoMymtsMDD+DRLLMPID\nyYoaq301xiA6Qo8eSdEqYYmLiAr5QfYlWq8QrhDP0aNTPPp3D5DPq8CzJnqUydtyJ+ok8eAEEyxW\nqQ+v1WvrFBPhnRvMpbFfn4iYCPd2RV2kxMXhum5SfWtNW90UUTrvS9k1LjSLVXTp0SMmJehxqUnx\nuHvINZh4R09527VxUh167j8hxoR4k8Z1FX8XTlC4EN3TSAiNMeifJj2HKZ28jWSEzoTru0uNT3PW\nslaWCUCTfnsVvB6CNQQ/msZz1ruL1LjGGmLw7EMDcV96GjpHxvodp/xtjMF7LUaQnhUjL93vrwf3\nQJTB6zoWeZ13UFwz0mUBAO6gP8MKIYun4OL8OkJaaK7q524vYo3eZyFa75uQwqjKYlLk9nv+dt2j\ngz+XzSHsA/fS09OxadMmbN68WR6DMWvWLIwaNQqAZIVkZWXh8OHD2LNnD9atW9fsa/CVKUiMb33K\nWHNQCUZliqoH7OKlqRfmPupN8R3V93p0jUhCDOMVlwifwRsjuqbD4RLkjBQjEwlnaU8AwJCkWyEK\nDIT6BMRGGRAdofc+/ACiDRFBsjcAoV7bjZZolHp5fHViyIJxXUoXJEcGdy96Jg5U+nRv6poK0R4B\nV0M0KmvdKZxOdUxo4rWSrzVSF+neb8CAToOk+yi/BqLAontUD0SapJdNVNTFqFt6IjFW+h0fQDDi\nWe2yj+s7AADgqkhBbERoz5TI69A3uWfQ43p1jcNDY67H5BHXytuu75wK0WGE0Cg1jF3iTH695ju6\njgDAyFlOejECrkqpIeDLu0PkOSQZU+SBYUadV5xNughc00X6uwcadxPPddHcLtRK22+JH4S+qf5j\nebQQeZ2qVx/wOI1nNS2uJ6L1UeihaOSUsULe5kL5zxYfwYjBzZ1vAgBwlu4QXTpEuweu9E5NwH8l\nSfd+7suj4K2C97qMdl0IARr4G+Ikq8lZ2rN54lmfEPQwrcHGQl0niC49ronpJm9jfSYkdZlTAV7x\nt2Yj0TO6p3TpmkQwvEF7rE0ruKJHeg/pOgjWQ6MhWDpfBgvD+yALNYmwHR4tN8icTlKPa7t5X5zr\nuyXghSFPYWrqA/K2CL23QbIeGo1f9xgNp0uQXUscy8B14Ub0a5yGaTfeB9uRURBtUeiRGC1NeKd4\n6WKMkSoB0cKeeytsx37lF9hOik6A9eAYOM4MDCn3HpB6rnpOj7dG/N1vn+PczZK/GN6pv5Vz2XSK\nikavugxYTvVFbqE7k8Z9LybOhNeH/y9+/V8jAQBdIjoDkHrnM/o/gLdGvILOllthO3InkqO7yIKh\nrIsInSnoiP+HUh/FX4fOhu8MrCP7XIdOhRMx7YapoSc28Dpc0zkefx86129XVNlQGFnpPFqLeyVG\nxeKGhkkYl5KBJc+MQIRRJzcMXSI647Xh8/DwgLsAeEVVzxjhPNcP1oNj4Mzvg/8d+iJenj5cPqey\nxxqpM8lWmmYaLYCpPR7Bi0Nm+W0fe8tNGGB7GI8PeACdogKnW6vgdYDLiGHib/12/bHf7+TPCdH+\nYhxnjMXLt72AqTdM8tvXPbI7os6MgL40H+/NGiH3tmP0Ufjvfo/gzeEvgynuB9vRkRjRN1X+nefe\nr/3tLWC5qKAWxq3Mfegv+ns5pgwdAOvBMXBduDF095xLB9ERAduRkX777DlD5M+cxrLSoiMCMQV3\na86bdnPnm/Dszc9BqO6qeuc5wYTZt85Er4oH4Mzvi8TSe2DStS45x5crcvJBDxzLyq6K+Ji2rZhg\nqNNqGcQao5GSHI/C+lokd/YvC8fqYNKZ0LdnEgApkBtl1EOeecBlQKRRauQ8QWi7aAXAIMoQCaNe\nJ99rjyS3Ga54WCL1EcGtA14H0RorNa6KoGqXmEhvT0UI7ZHwTMIYqfd/8QVrtFyW7okm9L61BzJu\n74m/uudnjDQacH23KJw6Xw8R0pQJid1icUGQLBGlmyHW/bnR1QiO5RDJRqJrQhRKKqyIidAj0ugu\nr+i99widSTVOQ4teyQmIjtCDY1iVFWIy6DH/Manx/f5saFMpiLwe8TFGxEb6u82eGT8ciw+fhF2w\nq6aTl6+nM+GZ+wMPNlUuHSo6jUBEAwTWLv3t3YMMk+PiVb9RTvFvUghGIPdccnwskqP9e9YPjvb6\n30NNI/dYxz27dMFen+nFukWnyEHqO25OwVqfBQQjdCbN5wkA9DoO0ZX7UV5Wimee+j2c3VmwPU3I\n/PJbXEg9hTNn8vC/ry3Fq/94Cf/O/RKfOhx44IFp0PWU7v3Uwl1IHdUfMXE8sv+1B7GpXWAprIQ+\n1oieD/cH656S/vF7+sPBO/H40s9gzsqHyIvgInWIfNMhWQwuB4o374Wt6iLAMEi+sxfi+iSi7nQl\nLv50DqIoQhepR9pjA1F+6AAYZz5uHT4eJQByl+xFr9/eAkBE1c8rwZ8R0XihDqNe+hXefvt15Oae\nxPnKAsT1TUJc3Gjc0ee/cDo3F4sX///2zjwgynJ7/J+ZYdgZkE1kEVEUccEdUMktrpgrXEWvZup1\nrVxyqUS+Zd+ytG96vdXtdjXNTLMsb9mvm7bp1dJETZOstMUV0QABkX0GmOf3xzADA4MMCiLwfP5i\n3vV5D+/7nOc85zzn/I2iomIydVn4P9SVr/7+EYOejDTJ5vzmU/iO6YitnwIbpQ2d/Dw5cyGP0IDW\nFiR5ZzRthaFQsmZeJEXaUlMd5rtF1YV7M0Z05nBBeSoGC4kAjVla1TZKEh/qQ+r1fHKdfuZcpSzQ\nduU5qYwjyTJRitpGiZ+n+eiuTXlo76Du/hy5blgx7WBT+5SUh8aBrNxibO2gcr48D03FR9on2IfT\nxbWXfDQqDIuUVkyX6SljSrS5E9TTxQkP/4pOblAPX0q8s7iSWt3sdrUzWGk6fUW8Ukd/N36+mI2P\nh2MNFoZDrXVRnMsTRtY0ZQV1WJxZamO6XlVcbJ1NI35LU0KWLCFjidiq77TQGd4LPbdeBFe5/oqj\njQM25fewVB8DwENjX+vC2arWVklKCGXZ1ac7nNVO4KjiP99eorjYfGT94i9nKSoeBMAXP1W00TgC\n/0yfxoPR5lM4xvdBqVDwyCMLuXTpAlu27OC15M18d/IYWZczmLd6AT4+hrb8428v4eLiglarZc6c\naQxYEGO4kALG39eZayWlnMguos3kQHzHBnPpg5+4eSaDVmEVz2KjVOEU6EbHuQZFnnXyGh/9+11a\nufTlt+N7UTnbEzLfkAKkrLiU0gIdqZ/8QvCsPti62VNWntbF+E56t3LkWnkbjGRl/EH7sb3wHx2C\nq4cb8+bNx8XFhUf3PcH5raeYMdONmHB/pj44gVWr/o+QkM68cHgdV4vT6TiwG/v37SXhwVno8q+z\n/AclDq2daeVm6D9G929HgJczYcEeNf9Db5MmrjBUtG7VsOsvakKpUBLh04fDxw2LYezUSpNSqGz6\nz+gymYOp3xLSqiKvULCfK8F+ruSXuHH2xq/8/p3hZVUplSyOD+M/R+2xdSlhVPtoggZ2MCmSByLa\noivRm+ovuDk7QnmNFQcbe/Q3PdHnu1Ka1g6hszdLuT6gTTgTB0VSWqbn8W8/N20vve6Lb1jFiN5W\neetO0sfRG7VKTadWFaPPv4T8mTNZv9LFI4Sfs85yXOtoUl6VE9xNDhnP0T9O0MmzLcK94uvx93Im\nOHAoF3IvE99xrNn9YtoN43LuFTPTfHi/AAb39MXBzgbHcme6qDIl1ce7J99eO84D7e7HwcaeV069\nYdpvn1sRjVKZKN8Is9+1TUn5OvmgVKiY8ZcHTJ37hI5juXDzEu1d23E+5yLOaieLU0ITO8WSfP0n\nPByqz3GP6zCC9MIMpnSeYNo2d0wXvjljh975JJqcXmTXsA4AzKtO2tvYE+UXwanrpxnXYSR//yDZ\nUN60nJI/2pkWo1ZmqH+U2W/7WmRhTLBpo7JBWa7wHNQOlOnLsFGqKNWXoUCBQqFACPNAWDuVPXpR\natH5G99pHFt+3sGDnSdAXoXC+0tIHJfPXsC+c6hJWQB88MG7HDpkqLmRkZFBQWaeYb2FgB5BrelU\npOFD97dZ/KcFFJYU8sKh59HdMPjRHihPFa9UKNHdLOba++cozdfiqHTgYuB5fPsNJjnzdwL6RWH8\n8FT2NuT+molzOzdsy1dSt/MKRKlQkVpyBYUSfNwdKLnQAVHyPX1ahYO6mJutU3D0MwyGSvQl7N//\nBZ988jE5RTmU5WhxFHmkXrmMp6cXISGGAJq/9pzK9rPvMzU+nmUPL2D+/MVs2vQO8eMmkuacR2z5\nN6JUKujVyTqfU11pkgrDaNZWHY3ebaZ1mcTBTwy1I1QqpWkkV3nBUD+fXvTzsRxy6ax24vG+85nz\n34OmbWEdPMujXQZUOz5+qHmIXuVRtIONPQvjelGk7c7mMwZbf1HPubyabOgoHww1dD5qmwqZ6S52\npex6ABonW8I6eHD6fJbBF3KL2Zy2Gn+md/mL2bb7/CK5zy/S9Pfxvf81WRiVZRHlF0GUX3mnrDQ4\neDNvFhPg7YSrnSNP9l1Y7X4aWxce77vAbJtSqcChfCpKbSxeJMwVhqPagYR+j5m2ze72EJt/2g6A\nT1F4tfvM6DK52v/pllYU0N41kMmdx5ttGxoQxdCAKNPfQCULo0IWg/0HMNi/+v8YoJW9WzWfQmRX\nHyK7+gAD2fbFr4Ah99Hi+LBq59tUeS9cbJ3L/TWgv3kV3cUu2AYZpkVn9x5f7fy53afRw6ub2Tb7\nKrJQt/3VbF3IsID7GN+x9gjHxMOruKnLY0CbcPb/P0Mk0HMzBtSY58jb0dP0f0zLq1g57+ngwZTO\n49mZXFFX4tSpk3z//QneeGMrtra2LFw4j8rGmL2NA2pbG3w0renUyjBoCPUM4ec0w/cyun2M6dir\ne37De2BbEiclUHw5j7fe2kRg6/LpwSrTtlVDvrt6hjKmfQzjNvwPQgjUNipKr3ZEX6xioO9AvFzt\nSXLYh1KhRC/0ZKZfZ+/OXbz55nacnJxZvfpZdDpttev6OvuwvFwW/fpFcOjQQQ4c2MfmzdtxcWm4\nNWiVaZpO7/LBaWMrjMooFFQaSVqfNlylvP1nqNox9OroxYBubVgzL5LnZlbvFKuiLzS8ZPa2Khb8\nuTuLJoTRJeDWkU9WO4ItWBhVeWp6X5ZP6YV3fViJVaakqu2uNPVkmsaqhKXww9oUhrWysLmN9+JW\nGG0zJ3sbUyhtZdQK8/ei+gUqeqLw0Orz3LcnC+um7yrLomuQQWG0stL/6OjoSGFhzaOZgoJ8XFxc\nsLW15fLlS/z8808mqx/AVmmMqKvcE1tem6LXlmGjscPPuQ2fffYpAKP6B9ItrA/ZP1WsWi8rKsEp\nQEP+pRx0OYbsC/oiw/95YJ/OaG9epXt7d4pvplJSdANHu4piakZZFBYW4uDggKOjE9nZWRw9egSA\nwMB2ZGVl8ssvZ03H6cvrqI8ePY6XX15HaGjXu6YsoIlaGEoUlCEslrhsLOzUKtztDPPybna3XgVe\nlRHhbU3RRHXB3MKo6CSN03QXbt46Umh8eA/Ss3SmKa6ewZ5cyr21s9jGQrnZqswaFcpnqRe4QSau\ndjW/zBpHWzRt66cSWGULw1JkiLFzF0JhSA5XhdaO1U14S4qnMtY6gr0cPEjJSzVz5t8Jrs6G+3q3\nstw+o6WrVqqrTfMsndSDXT/kkImluH4D7vbVp8nqS3l6O3qRVXwDZ1tHpsX3oEwvrB40aTSudO/e\ng+nT/0JExAD69x9otj8iYgAff/whM2ZMoW3bQLp1627qIxQKpclPU9lfY5yCrfr/bz20HZd3/khi\n0jK6dOlGWtofONjZ8NcZs0l8IYFfXzsGSgU+Q4NwDfXCf2xnLr33I0JAgfc1xr0+ikUz47n221Ge\nXj6Pm8Vu2Dp5mSxjhUKBj6M3KXmpBAa1I7tjCA89NAlfXz/CwnoAYGNjw7PPruHvf38JrVaLvb09\nL7/8Ovb29oSEdMbJyYlRo+48y3ddaJIKw7D0lHrJcHunPDWtLz9fzCLA2xlvj6GUCT2D/PvX6RqV\no1HqgrFjUCqU2FpwngZp2jKyXTTdvbqYbV/Ycw452ptEtqk+l19bJ1lioQRqVQZ2b0Ovzg/y5eUD\npmmZhiKgtaET9nXXcB3DXLsly7OLRwhlf3SgJNMHx7AKWT3aYyZFJUUW667X1kmKGpzIVZnYKZZW\n9m6mUOE7JaZfW7S6Mob29rO43+jDsNT+bkEehAaO49OLDkT49DHbN7f7dMpEmUX51fZeqKy09h8K\nnciBK4cZHjgUpVJhVsLVGlauXGX2u1evimdQq9WsW2eet2nvxa84e/EiUStGotG4otG48vbbO037\nl89NYM/FrxhYxX+1dMISbOJVhHl1Ndvu6a7BN3I0duX5voxoOnqg6WhwMj/Y2RA6b+jg/wnAzBcN\nU9e+bQzrKt5+eyfZxTf4JjWJoQH38UBitMXn7dw5lI0b36q2PTPzOkII+vWLtHBWw9E0FcY9RHtf\njWm9hb2NHbHBI+/avdXloycHG8tRLgqFglHtqxek6uxevbCPkdo6Sa0VCgMM4bZ3QxZd27mzYmpv\nrul/4YNz39XYsSkVSsZ3Gsl7V36nf9cKJ2lXj5qzHdbWSWr11iU9dLZ1Ii54lFXHWoOdraqaP6sy\nxiipmtqvUqosxvf3qNI5Vqa290Jn5SpyVzvNXf1GjBaWQ03huiq1xfb09q7uGwJDgMbU+7uy6+oJ\ni/vBfPrTyEsP96ekSu13d/tWtyWLzz/fw6ZN/2LRoqV1PvdOaZIKw5i6+25mw70XsVEZRsrVcvvf\nAbVdS1dmXVnWu0lHfzdupBmmFm7VsUX39WdYHz+rp0BqVZ6l1mfJvZsYpypra39dqH0gca/Kov6/\nkVB/L2PMgUV0FmThWY+ZKEaMGMWIEfU3AKkL947XuA40/kTUvUFDdAxqlWW/h/HDC27V3uL+xsbY\nvqrRPJVRKBR1CjKoKZTU6LsIcg20uL+xsbmLCsMoi7Yu/hb3NzYNIosarBXjtHAb5/pNx3Ev0SQt\nDGNioqppjVsatU093CkrIx6nVJRRUFJAB9cgLuam0P6e7SQN03OO9dgxVPZrPNd/BcVlxRSUFNDe\ntR2Xcq/QwbVdvd2rPmmITrKyU/v5AYkUlhZRUFJIkGsgKbmpdHBrV2/3qk8qZFF/30hla+W5/ivQ\nlmkpKCkgUNOWq/nX7tmBRH3QJBWG0cJo6QrjVs7N+sDN3s0sXDLYrfbstI2FNRbGneBu72bmJ7q3\nZVH/70XlZ29l70YrKlbq36vKAhreCq+68LI5KwtoslNS5S9vy9YXDTJ6qoy1kS/3AkZZODaQLO6F\niDxrMc3bN5AsmhINoTBaMk2nR7BAS7cw3O1b4ah2IMDFcnjl7RLqbsj9dC+tc6kNTwd37G3s6l0W\nHVzbWQxZvpfxcvTEzsaOAGff2g+uA/7Ovrio62ctye2Qn5/P7t3/rtM53o5e2Kls8Xfx5YMP3kOr\nrZ+gDU8HDzzt3evlWk0JhWiCoUbTPlxMcanW6nQEzRl3D0eys2692K6uCCHQC73FtQn3MlIWFTSU\nLBozJc8ff1xj+fIlbNv2fp3OM8oiPn4sb765HY2mbgtrLWFM5FhXWZSVlaFSNd67dKdlrJuoD0M6\nvY00REemUCialHVhRMqigoaShaIRYxQ3bHiNa9euMnPmg/TtG8Gjjy7i3Xe3c+DAV5SUlDJo0BBm\nzpxLcXExK1cmcP16Bnq9noULF3DpUiqZmddZuPBh3NzceOWVf5lde+vWzXz77SF0Oi3duoXxxBOJ\nAFy9msratavJyclBpVKxatWL+Pr68d672/nyy89QKpVERg5k3rz5LFw4jwULlhAS0pmbN3OYPXsa\nu3Z9wmeffcqRI4fR6bQUF2t58cW/kZCwjPz8PEpLS5kz52Giosoz9n72KTt37kCpVNChQ0eWLl3O\n9OmT2bnzI1QqFYWFBeW/dzeK4mmSCqOS11sikTQCH537lFMZP9brNXt5d+fPwaNr3F85vTnAd98d\nJTU1hU2btiGEYPnypfzwQzI5Odl4enrx0ksvA+DgoKBvX8H777/HP/6xEY2mekXA8eMnMWPGbABW\nrVrJkSOHGTAgimeffYpp0/5KVNRgSkpK0Ov1HD16hMOHv2HTpm3Y2tqSl5dXQ4srlOvPP//Itm3v\n4+zsjF6vZ82adTg6OnLzZg7z5hmuf+HCed55Zyv/+tcWNBoNeXl5ODo60rt3H5KSDhMVNZh9+75k\nyJD7G81KaZIKQ1oYEonk+PFjfPfdcWbOfBAhBEVFxaSmphAW1pN//vMVNmx4jf79o4iOvo+iojwM\nI0zLfcbJk8d5993taLXF5OXl0b59B3r27E1m5nXT6F+tNviyTpw4zqhRY7C1NUQQWpP8r1+/CJyd\nDf4fvV7Pxo2vkZx8CqVSQWbmdW7cyObUqRMMGXK/SaEZrzt69DjefXc7UVGD2bv3PyxfXr2y492i\nSSoMiUTSuPw5ePQtrYG7gRCChx6awdixcdX2vfnmOyQlfcvGja/x228/Eh//UI3X0el0rF//Elu2\nvIOnpxdbtryBTqejJuVicPtWn5pTqVSm/GKG8ytwqFQf/quvPicnJ4e33tqBUqkkPn4sWq2uxswV\n3bv3IC3t/0hO/h69Xk9QUOMtnm2SUVJyRkoiaXlUTW8eERHJnj2fUFRkSCtuGKnfIDMzEzs7O4YP\nH8HkyVM5c+ZM+flOFBQUVLuuTqdDoTBkwy0sLOTgwf2m4729W3Po0EEASkpK0GqLCQ833FerNRRe\nys3NBaBNGz9++cVwrwMH9tX4HPn5+bRq5Y5SqeT770+Qlmao89GnTzgHDuwjN/em2XUBYmJG8r//\n+z+MGjXW4jXvFk3Swujg3o7T6WfxtJCGWSKRNE+qpjd/9NFFXLp0iYcf/itgUChPP72K1NQr/POf\nr6BUKrCxUfPCC4YMt2PHxvL444vw9PQyc3o7OzszZkwc06ZNok0bX0JDK5IwPvXUs6xdu5rNmzei\nVqtZtepFIiL6c+7cb8yaNQ1bWzWRkQOZO/dRJk9+kKefXsEXX3xGnz79anyO4cNHsHz5UubMmUZw\ncAiBgYZFoEFB7Zk2bSYLFsxFpVLRsWMIiYnPlJ/zAJs3byA6unoy0btJkwyrzdXm89WZIwzw7Wex\nrGNLwsvLhevXa3K6tSykLCqQsqigOcjiwIF9fPvtIZ566tk7uk6LDKvV2DnXueaERCKRNEVefnkt\nR48msW7dK43dlKapMCQSiaSlsHjxE43dBBNN0uktkUgkkruPVBgSiUQisQqpMCQSiURiFQ2uML75\n5htGjBhBTEwMb7zxRrX9Op2OJUuWMHz4cCZNmsS1a9caukkSiUQiuQ0aVGHo9XpWrVrFm2++yaef\nfsqePXs4f/682TH//ve/cXV15csvv2T69OmsXbu2IZskkUgkktukQRXG6dOnCQwMxM/PD7VazahR\no9i/f7/ZMfv37ycuzrC0PyYmhqSkpIZskkQikUhukwZVGOnp6bRp08b0u3Xr1mRkZJgdk5GRgY+P\noWi6SqVCo9GQk5PTkM2SSCQSyW3QoArDmkXkVY8RQjSpcpgSiUTSUmjQhXs+Pj5mTuz09HS8vb2r\nHZOWlkbr1q0pKysjPz8fV9faK2Ld6RL35oSURQVSFhVIWVQgZVE/NKiF0b17d1JSUrh69So6nY49\ne/Zw//33mx0zdOhQdu/eDcDnn39OZGRkQzZJIpFIJLdJgycf/Oabb3jhhRcQQjBhwgTmzp3Ls8Qs\nGwAACZ5JREFUq6++Svfu3Rk6dCg6nY4nnniCs2fP4ubmxvr16/H392/IJkkkEonkNmiS2WolEolE\ncveRK70lEolEYhVSYUgkEonEKqTCkEgkEolVNDmFUVtuquZGYmIiAwYMYMyYMaZtN2/eZObMmcTE\nxDBr1izy8iqqiT3//PMMHz6ccePGcfbs2cZocoOQlpbGtGnTGDlyJGPGjGHbtm1Ay5SFTqcjPj6e\n2NhYxowZw2uvvQZAamoqEydOJCYmhqVLl1JaWmo6vrnna9Pr9cTFxfHwww8DLVcWw4YNY+zYscTG\nxjJhwgSgnr8R0YQoKysT0dHRIjU1Veh0OjF27Fhx7ty5xm5Wg/Ldd9+JM2fOiNGjR5u2vfTSS+KN\nN94QQgixceNGsXbtWiGEEAcPHhRz5swRQgiRnJws4uPj736DG4iMjAxx5swZIYQQ+fn5Yvjw4eLc\nuXMtUhZCCFFYWCiEEKK0tFTEx8eL5ORk8dhjj4m9e/cKIYRYuXKleO+994QQQuzYsUM888wzQggh\n9uzZIxYvXtwobW5I3nrrLbFs2TIxb948IYRosbIYNmyYyMnJMdtWn99Ik7IwrMlN1dzo27cvGo3G\nbFvl/FtxcXEmGezfv5/Y2FgAevToQV5eHpmZmXe3wQ2El5cXoaGhADg5OdGhQwfS09NbpCwAHBwc\nAMOIubS0FIVCwbFjx4iJiQEMsti3bx/Q/PO1paWl8fXXXxMfH2/advTo0RYpCyEEer3ebFt9fiNN\nSmFYk5uqJZCdnY2npydg6Eizs7MB87xcYJBPenp6o7SxIUlNTeWXX36hR48eZGVltUhZ6PV6YmNj\nGThwIAMHDiQgIACNRoNSafikfXx8TM/b3PO1rV69mieffNKUUujGjRu4urq2SFkoFApmzZrF+PHj\n2bVrF0C9fiNNqqa3kEtGbokl+TS3vFwFBQUsWrSIxMREnJycany+5i4LpVLJxx9/TH5+PvPnz69W\nNgAqnreqLEQzytd28OBBPD09CQ0N5dixY4Dh+ao+c0uQBcDOnTtNSmHmzJkEBQXV6zfSpBSGNbmp\nWgIeHh5kZmbi6enJ9evXcXd3BwwjhLS0NNNxaWlpzUo+paWlLFq0iHHjxhEdHQ20XFkYcXZ2pl+/\nfvzwww/k5uai1+tRKpVmz2uURV3ztTUFvv/+e/773//y9ddfo9VqKSgoYPXq1eTl5bU4WYDBggBw\nd3cnOjqa06dP1+s30qSmpKzJTdUcqToSGDZsGB999BEAu3fvNsng/vvv5+OPPwYgOTkZjUZjMkWb\nA4mJiQQHBzN9+nTTtpYoi+zsbFOkS3FxMUlJSQQHBxMREcHnn38OmMti2LBhzTZf29KlSzl48CD7\n9+9n/fr1REREsG7duhYpi6KiIgoKCgAoLCzk8OHDdOrUqV6/kSaXGsRSbqrmzLJlyzh27Bg5OTl4\nenqycOFCoqOjeeyxx/jjjz/w9fXllVdeMTnGn3vuOQ4dOoSDgwNr1qyha9eujfwE9cPJkyeZOnUq\nnTp1QqFQoFAoWLJkCWFhYSxevLhFyeLXX38lISEBvV6PXq9n5MiRPPLII1y5coWlS5eSm5tLaGgo\na9euRa1Wt5h8bcePH2fLli1s2LChRcriypUrLFiwAIVCQVlZGWPGjGHu3Lnk5OTU2zfS5BSGRCKR\nSBqHJjUlJZFIJJLGQyoMiUQikViFVBgSiUQisQqpMCQSiURiFVJhSCQSicQqpMKQSCQSiVVIhSFp\n0kycOJG4uDhGjRpF165diYuLIy4ujsTExDpfa/bs2Valu16xYgXJycm309w6cebMGb744osGv49E\nYi1yHYakWXD16lUmTJhwy+yjxlQRTYVdu3aRlJTE+vXrG7spEgnQxHJJSSR1ISkpibVr19KzZ0/O\nnDnD/Pnzyc7OZseOHaaCOgkJCYSHhwMwePBgtm7dSlBQEFOmTKFXr16cOnWKjIwMRo8ezeLFiwGY\nMmUKjz76KFFRUTzxxBM4Oztz/vx50tPT6d27N2vWrAEMuXmefPJJbty4QUBAAGVlZQwbNoxJkyaZ\ntTMzM5Nly5Zx48YNAKKiopg9ezavv/46hYWFxMXFERERQUJCAqdOnWL9+vUUFRUBsGjRIgYNGkRK\nSgpTpkxh9OjRnDx5Ep1OxzPPPEPv3r3viqwlLYQ7KdYhkdwrpKamisjISLNtR44cEV26dBE//vij\naVvl4jLnzp0TQ4YMMf0eNGiQuHDhghBCiMmTJ4tly5YJIYTIzc0V4eHhIjU11bTv0KFDQgghHn/8\ncTF16lRRUlIitFqtGDFihDh27JgQQohHHnlEbNq0SQghxJUrV0SvXr3Ezp07q7V98+bNYuXKlabf\nubm5QgghPvjgA7F06VKztsfGxoqsrCwhhBBpaWli0KBBIj8/X1y+fFmEhISIPXv2mJ59yJAhorS0\n1HohSiS1IC0MSbOmffv2dOvWzfT70qVLvPrqq2RkZKBSqcjIyCAnJwc3N7dq5z7wwAMAuLi4EBQU\nREpKCn5+ftWO+9Of/oSNjeFT6tKlCykpKYSHh3Ps2DGef/55APz9/U2WTFV69uzJO++8w7p16+jX\nrx9RUVEWjzt58iSpqanMmjXLlJBSpVJx5coVHB0dcXBwYOTIkQD0798flUrFpUuX6NChg7Xikkhu\niVQYkmaNk5OT2e8lS5bwzDPPMHjwYPR6PWFhYWi1Wovn2tnZmf5WKpWUlZXV6Thr6yz06dOH3bt3\nc+TIET788EM2b97M9u3bqx0nhKBr165s3bq12r6UlJRq2/R6fbOq9SBpfJqOB1AiqQVhRfxGfn6+\nKTvpzp07a1QC9UF4eLgprfTVq1c5fvy4xeNSU1NxdnZm5MiRJCQk8NNPPwGGWhfGNOYAvXv35ty5\nc5w4ccK07fTp06a/i4qK2Lt3L2AoUQoQGBhYvw8ladFIC0PSbLBmNJ2YmMjcuXNp06YNERERuLi4\nWDy/6rVq2ner455++mmWL1/Onj17aN++Pb179za7n5GkpCS2bduGSqVCCMGqVasAGDhwIG+//Tax\nsbFERkaSkJDA66+/ztq1a8nLy6OkpISAgAA2bNgAgKenJ7///jvx8fHodDrWr1+PSqWqVSYSibXI\nsFqJpIHQarWo1WqUSiXp6enEx8ezY8cOAgIC6v1exiipw4cP1/u1JRIj0sKQSBqICxcusGLFCoQQ\n6PV6lixZ0iDKQiK5W0gLQyKRSCRWIZ3eEolEIrEKqTAkEolEYhVSYUgkEonEKqTCkEgkEolVSIUh\nkUgkEquQCkMikUgkVvH/AcQ/YGad+SX7AAAAAElFTkSuQmCC\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f971b401110\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- }
- ],
- "source": [
- "with tf.Graph().as_default():\n",
- " hp = tf.contrib.training.HParams(\n",
- " learning_rate=0.05,\n",
- " max_steps=max_steps,\n",
- " )\n",
- " train_ds = setup_mnist_data(True, hp, 500)\n",
- " test_ds = setup_mnist_data(False, hp, 100)\n",
- " tf_train = autograph.to_graph(train)\n",
- " (train_losses_, test_losses_, train_accuracies_,\n",
- " test_accuracies_) = tf_train(train_ds, test_ds, hp)\n",
- "\n",
- " with tf.Session() as sess:\n",
- " durations = []\n",
- " for t in range(burn_ins + trials):\n",
- " sess.run(tf.global_variables_initializer())\n",
- " start = time.time()\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = sess.run([train_losses_, \n",
- " test_losses_, \n",
- " train_accuracies_,\n",
- " test_accuracies_])\n",
- " if t \u003c burn_ins:\n",
- " continue\n",
- " duration = time.time() - start\n",
- " durations.append(duration)\n",
- " print('Duration:', duration)\n",
- "\n",
- " print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n",
- " plt.title('MNIST train/test losses')\n",
- " plt.plot(train_losses, label='train loss')\n",
- " plt.plot(test_losses, label='test loss')\n",
- " plt.legend()\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Loss')\n",
- " plt.show()\n",
- " plt.title('MNIST train/test accuracies')\n",
- " plt.plot(train_accuracies, label='train accuracy')\n",
- " plt.plot(test_accuracies, label='test accuracy')\n",
- " print('test_accuracy', test_accuracies[-1])\n",
- " plt.legend(loc='lower right')\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Accuracy')\n",
- " plt.show()\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "A06kdgtZtlce"
- },
- "source": [
- "# Eager"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "hBKOKGrWty4e"
- },
- "outputs": [],
- "source": [
- "def predict(m, x, y):\n",
- " y_p = m(x)\n",
- " losses = tf.keras.losses.categorical_crossentropy(tf.cast(y, tf.float32), y_p)\n",
- " l = tf.reduce_mean(losses)\n",
- " accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
- " accuracy = tf.reduce_mean(accuracies)\n",
- " return l, accuracy\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 0,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- }
- },
- "colab_type": "code",
- "id": "HCgTZ0MTt6vt"
- },
- "outputs": [],
- "source": [
- "def train(ds, hp):\n",
- " m = mlp_model((28 * 28,))\n",
- " opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
- " train_losses = []\n",
- " test_losses = []\n",
- " train_accuracies = []\n",
- " test_accuracies = []\n",
- " i = 0\n",
- " train_test_itr = tfe.Iterator(ds)\n",
- " for (train_x, train_y), (test_x, test_y) in train_test_itr:\n",
- " train_x = tf.to_float(tf.reshape(train_x, (-1, 28 * 28)))\n",
- " train_y = tf.one_hot(tf.squeeze(train_y), 10)\n",
- " test_x = tf.to_float(tf.reshape(test_x, (-1, 28 * 28)))\n",
- " test_y = tf.one_hot(tf.squeeze(test_y), 10)\n",
- " if i \u003e hp.max_steps:\n",
- " break\n",
- " with tf.GradientTape() as tape:\n",
- " step_train_loss, step_train_accuracy = predict(m, train_x, train_y)\n",
- " grad = tape.gradient(step_train_loss, m.variables)\n",
- " opt.apply_gradients(zip(grad, m.variables))\n",
- " step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
- "\n",
- " train_losses.append(step_train_loss)\n",
- " test_losses.append(step_test_loss)\n",
- " train_accuracies.append(step_train_accuracy)\n",
- " test_accuracies.append(step_test_accuracy)\n",
- " i += 1\n",
- " return train_losses, test_losses, train_accuracies, test_accuracies\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 40,
- "metadata": {
- "colab": {
- "autoexec": {
- "startup": false,
- "wait_interval": 0
- },
- "height": 789
- },
- "colab_type": "code",
- "executionInfo": {
- "elapsed": 56025,
- "status": "ok",
- "timestamp": 1531163800231,
- "user": {
- "displayName": "",
- "photoUrl": "",
- "userId": ""
- },
- "user_tz": 240
- },
- "id": "plv_yrn_t8Dy",
- "outputId": "68be955d-61dd-43e4-b540-3794e3c8f990"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Duration: 4.2232978344\n",
- "Duration: 4.2386469841\n",
- "Duration: 4.24286484718\n",
- "Duration: 4.24036884308\n",
- "Duration: 4.25758385658\n",
- "Duration: 4.23242998123\n",
- "Duration: 4.4213449955\n",
- "Duration: 4.29613113403\n",
- "Duration: 4.28209114075\n",
- "Duration: 4.24192905426\n",
- "Mean duration: 4.26766886711 +/- 0.055508619589\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXdgFGX6x78zW7KbTSE9JIA0pQkIooCgqBx2qiK/O0XU\n8zyFAw/w8MSCFcuJCHqKoFiwIHIgIgooaGjSCU1aaCEJ6W1btszM74/ZmZ2tWchuQjbP55/s7szO\nvDPZeb/vU97nZQRBEEAQBEEQ9cA2dQMIgiCI5gEJBkEQBBESJBgEQRBESJBgEARBECFBgkEQBEGE\nBAkGQRAEERIkGARBEERIkGAQRIjs3r0bt99+e1M3o14KCwvRtWtX8Dzf1E0hogwSDKLB3HzzzejZ\nsyeqq6s9Ph85ciS6du2KoqIiAMC///1vdO3aFQcPHpT3yc/PR9euXeX348ePx/Lly+X3CxYswNCh\nQ9G3b1/ceOONmDZtGgDgrrvuQt++fdG3b190794dvXr1Qp8+fdC3b18sXLjQp43vvfceZsyY0aDr\n7NevH3766acL+s6HH36IuXPnYufOnRgyZEiDzi/hfY/8wTBMWM5FEErUTd0AIjpo06YN1qxZg/vu\nuw8AcPz4cdhsNo+Oi2EYtGrVCu+88w4+/vhjj8/9sXLlSqxevRqfffYZ2rRpg4qKCmzcuBEA8MMP\nP8j7jR8/HqNGjcLdd9/doGsQBCHsHW1OTg6efPJJOBwO6sSJZg9ZGERYGDlyJFauXCm/X7lyJUaP\nHu2z3+jRo3Hs2DHs3r273mMeOnQIgwcPRps2bQAAKSkpGDt2rN99g1W42bx5MxYsWIAff/wRffr0\nwahRowCIQjN37lz8+c9/xlVXXYWCggKsWLECd9xxB/r27Ythw4bhm2++kY/jbSXcfPPNWLx4MUaM\nGIFrrrkG06ZNg91ul7fX1tbi7Nmz6N69Ox599FGUlpbKVlBZWRkEQcDChQsxbNgwDBgwAFOnTkVt\nbS0AwG6341//+hf69++Pa665BmPHjkVlZSXmzp2LPXv24OWXX0bfvn3xyiuv1HsfS0tL8fjjj6N/\n//649dZb8e2338rbDhw4gLvvvhtXX301Bg8ejDfeeCPo+QHAZDLhmWeeweDBgzFkyBC888478v3P\nz8/H+PHj0a9fPwwcOFC2CInogCwMIiz07t0bq1atwqlTp9C+fXusXbsWX331FebOneuxn06nw2OP\nPYa3334bX331Vb3HfPXVV5Geno7+/fuje/fuYNkLH+Ncf/31eOyxx5Cfn48333zTY9vq1auxaNEi\ndOjQATzPIyUlBQsXLkSbNm2we/duPPLII+jVqxe6desGwNcaWrt2LRYvXgytVov/+7//w8qVKzFu\n3DgAwJYtWzBgwADodDosWrQIM2bMwG+//SZ/99NPP8XGjRvx5ZdfIikpCa+88gpefPFFzJkzBytX\nroTJZMLmzZuh0Whw5MgRxMTEYOrUqdi7dy9GjhyJe+65J6TrnzZtGrp06YL58+fj5MmTeOihh9C2\nbVsMGDAAs2fPxoQJEzBixAhYrVacOHECAAKeHwBmzJiB9PR0bNiwAWazGY899hiysrJw7733Yt68\neRg8eDCWLFkCu92OQ4cOXfD/i7h0IQuDCBsjR47Ed999h61bt6Jjx45IT0/3u9+9996L8+fPY/Pm\nzUGPN2LECDz33HPYunUrxo8fj+uuu85vfKIhjB49Gp06dQLLslCr1RgyZIhs0fTr1w+DBg0Kag09\n8MADSE1NRUJCAm666SYcOXJE3vbbb78FjVssW7YM//znP5Geng6NRoNJkyZh3bp14HkearUa1dXV\nOH36NBiGQffu3WEwGC74+s6fP499+/bhySefhEajQdeuXTF27FisWrUKAKBWq5Gfn4+qqiro9Xr0\n6tVL/tzf+SsqKrB582bMnDkTMTExSE5OxoQJE7BmzRr5e4WFhSgpKYFWq0Xfvn0vuM3EpQtZGETY\nGDFiBO6//34UFBRg5MiRAffTarWYOHEi5s2bhzlz5gQ95l133YW77roLHMfhl19+wfTp09GjRw8M\nGjQoLG3OzMz0eJ+Tk4P3338fZ86cAc/zqKurQ5cuXQJ+PyUlRX6t1+tRVlYGQHSRbdu2DU8//XTA\n7xYVFeEf//iHbDUJggC1Wo3y8nKMHDkSxcXFmDZtGoxGI4YPH45p06ZBpVJd0PWVlZUhMTERer1e\n/iwrKwuHDx8GAMyePRvz5s3D7bffjrZt22LSpEm48cYbfc4/YsQITJ06FYWFhXA6nRg8eLDcZkEQ\n0Lp1awCi9fHOO+/gnnvuQatWrfDggw82OLZEXDqQYBBhIysrC9nZ2di0aRNmz54ddN8xY8bgo48+\nws8//xzSsVUqFW699VYsXLgQJ06cCJtgKF1MdrsdTzzxBP7zn/9g6NChYFkWkyZNChofCcTBgwfR\npk0bJCUl+ZxHonXr1pg9ezb69Onj9xiTJk3CpEmTUFRUhL/97W/o2LEj7r777gsKnqenp6OmpgYW\niwWxsbEARKtDsv7atWsni/a6deswZcoU7Ny5Ezqdzuf8HTp0wA033ICYmBjs2LHDbztSUlLw8ssv\nAwD27NmDhx56CNdeey3atm0bcpuJSxdySRFhZfbs2fjss8+g0+mC7qdSqfCPf/wDixYtCrjPypUr\nkZOTA7PZDEEQkJOTg5MnT8pukwshNTUVhYWFQTt/h8MBh8OBpKQksCyLnJwcbN269YLPBYjuqBtu\nuEF+n5KSgurqaphMJvmzcePG4e2335bTjisrK7FhwwYAwI4dO3D8+HHwPI/Y2Fio1WrZukhNTcW5\nc+eCnl+6zszMTPTp0wdvv/027HY7jh49iuXLl2PEiBEAgO+//14OZsfHx4NhGLAsG/D8aWlpGDRo\nEGbPng2TyQRBEHDu3Dns2rULgBjTKSkpAQAkJCSAZdmLijsRlyYRtTCKi4sxY8YMlJeXQ6VSYezY\nsXjggQc89tm5cycmTpwoj0CGDRuGiRMnRrJZRJhRjjS9R5LBRsN33XUXFi5cCKPR6Hf/uLg4LFiw\nAKdOnQLHccjKysILL7zg4xcPZcR922234fvvv0f//v3Rpk0brFixwud7BoMBzzzzDJ544gk4HA7c\ndNNNGDp0aMBjBjtvTk4OXnrpJfl9x44dceedd2Lo0KEQBAFr1qzBhAkTAAAPP/wwysrKkJKSgttv\nvx1Dhw5FeXk5Zs2ahZKSEhgMBtxxxx1yJ//AAw/gqaeewtKlSzFixAg888wzQds2Z84czJo1C9df\nfz0SExPxxBNPYODAgQDEDLLXX38ddXV1yM7Oxty5c6HVaoOe/4033sBbb72FO++8ExaLBW3btsUj\njzwCQLSsJDFJTU3FM888g+zs7KD/G6L5wERyxb2ysjKUl5ejW7duMJvNGDNmDN5//3106tRJ3mfn\nzp1YvHgxFixYEKlmEESjUlFRgVGjRtUb1CeI5kZEbcW0tDQ5HdFgMKBTp04oLS2N5CkJoskxGo1B\ng90E0VxptKB3QUEBjh496tf/nJubi1GjRiE9PR0zZsxA586dG6tZBBF22rdvj/bt2zd1Mwgi7ETU\nJSVhNpsxfvx4TJw4EX/60598trEsC71ej5ycHMyePRvr1q2LdJMIgiCICyTi6QtOpxNTpkzByJEj\nfcQCEF1VUo74kCFD4HA4fIrYedMIGkcQBEF4EXGX1MyZM9G5c2c5I8Sb8vJypKamAhDr2gBAq1at\ngh6TYRiUlRmD7tNSSEuLp3vhgu6FG7oXbuheuElLi2/Q9yMqGHv27MHq1atxxRVXYNSoUWAYBlOn\nTkVRUREYhsG4ceOwbt06fP3111Cr1dDpdD61hwiCIIhLg0aJYUQCGjGI0OjJDd0LN3Qv3NC9cNNQ\nC4OmYBIEQRAhQYJBEARBhAQJBkEQBBESJBgEQRBESJBgEARBECFBgkEQBKHAZDJh5crlF/XdGTP+\nCbPZVP+OLhYvXoilS7+4qHM1BSQYBEEQCozGWqxc+a3fbTzPB/3um2++A4MhLhLNuiSgFfcIgiAU\nLFjwHoqKCvHww/ehX7/+GDhwED75ZBFSUlKRl3ccS5Ysw9NPP4myslLY7TaMHftnDB8+CgAwduwI\nfPzxElgsFjz55BT07HkVDh3aj7S0DLz++hxotdqA5z1x4hjeeut12Gw2ZGdn4+mnZyEuLg7ffrsU\nq1atgFqtRvv2HfDCC69i3749mD9/jmvdEwb//e8ij2V4IwUJBkEQlyzLNuZh19GGLYmgUjHgOPf8\n5Gu6puPemwNXxH788ck4c+YUFi/+EgCwb98eHDnyB5YsWSavAT9z5izEx8fDZrPhb397AEOG3IyE\nhAQA7oWrCgrO4cUXX8NTTz2D559/Gr/9thG33HJbwPO+8soLmDbtKfTufRU+/vhDfPLJQkyePA1f\nfvkZli9fDbVaLbu7li79AtOn/xtXXtkLdXV1QYUonDRLl9T3m042dRMIgmhBdO/eQxYLAFi27Cs8\n+OBf8Pe/P4TS0lIUFOS7triFqXXrLHTqJApTly5dUVxcFPD4ZrMJZrMJvXtfBQC47bY7kZu7DwDQ\nufPleOGFZ7B+/U9gWXGZ3p49e2P+/LexfPlSGI21jbYMbrO0MBatOoTOrQcirVXkTTCCIJqOe2/u\nHNQaCIVwlAZRrlG/b98e7N27GwsXfgqtVovJk/8Ou93u8x3lqJ9lVX73URKoStN//jMPubl7sWVL\nDj799CN88cW3uP/+B3Hdddfj99+34O9/fwjvvPM+2rW77CKvLnSapYUBAAVloWciEARBhEpsbCws\nFkvA7WazCfHx8dBqtTh79gwOHz7kd78LKdNnMMQhISEBBw7kAgDWrfsRV10lrl1fUlKMPn2uxuOP\nT4HZbILVakFhYQE6duyE++6bgC5duiE//0zoF9gAmqWFAQAFZWb0uTytqZtBEESUkZCQiJ49e2PC\nhP9D//7XYeDAQR7b+/e/Dt999z88+OBf0K7dZbjyyp6Kre4YhhiQDp2ZM1/AW2+9BpvNhqysbMyc\nOQtOpxMvvfQczGYzAAHjxt0HgyEOixZ9gL17d0OlUqF9+44YMGBQvccPB82yWu3w6atwbbd0PDby\nyqZuSpNDlTjd0L1wQ/fCDd0LNy2yWq1Wo0JJpbWpm0EQBNGiaJYuqYTsMpTXNk4aGUEQBCHSLC0M\nc/pO8J23wGbnmropBEEQLYZmKRgSFbV1Td0EgiCIFgMJBkEQBBESzVIwkrViOm1JTW0Tt4QgCKLl\n0CwFIzsuGwBQYa5p4pYQBBFtNKS8OQAsW/Y1bDab322TJ/8dx44dvehjNzXNUjCS9YkAgCorWRgE\nQYSXYOXNQ+Hbb7+GzRad7vJmmVabFp8EAKi1U3kQgiDCi3d584kTp+Crr5bg119/hsPhxA033IiH\nH34UdXV1eP75f6OsrBQ8z2PChEdQWVmO8vIyTJ78GFq1aoV58z4IeJ6ff16LL774FAAwYMAgPP74\nZPA8j9dffxnHjh0BwODOO0fg3nv/7LfEeVPQLAWjdWIyAMDoJJcUQUQzK/J+wL7Sgw06hoplwPHu\nghZ90ntiTOe7Au7vXd58167tKCjIx6JFn0MQBDz11DTs35+L6upKpKam4c033wEAWCxmxMYa8M03\nX+Pddz90lTv3T3l5ORYseA+ffPIl4uLiMXXqJGzZkoO0tAyUlZXis8+WAoBcztxfifOmoFm6pDol\ntwUAmJmKJm4JQRDRzs6dO7Br1048/PB9ePjh+5CffxYFBfno2LEzdu/eiQUL3sP+/bmIjTW4viFA\nWebcH0ePHkbfvv2QkJAIlmUxbNhtyM3dh6ysbJw/X4R33nkLO3b8Lh/TX4nzpqBZWhiZ8ekAp4Zd\nU9nUTSEIIoKM6XxXUGsgFBpaS0oQBIwf/yBGjBjts+3jj7/A779vxYcfvodrrx2ABx98JORj+ivj\nFx8fj08//Ro7dvyOFSuWYePGn/H008/7LXHeWGtgKGmWFgbLsNA6kiHEmGB1RGdwiSCIpsG7vHn/\n/gOwZs33sFrF+nXl5WWoqqpCeXk5YmJicMstt+HPf74fx48fc33f4KouG5ju3a/E/v37UFtbA47j\n8Msv63DVVX1RU1MNnucwZMhNeOSRx3HihHhMfyXOm4JmaWEAgA7xsKMUJaZKtE/KaurmEAQRJXiX\nN584cQrOnDmDxx57CIAoKM899zIKCs7hv/+dB5ZloFZr8OSTTwMARowYhSefnILU1DSfoLdU8jwl\nJRV///skTJ78dwDAwIGDMXjwDcjLO4HZs1+EIPBgGAaPPTY5YInzpqBZljcHgH9+tQBFqv24r8ME\nXNehR1M3p8mg0s1u6F64oXvhhu6FmxZZ3hwAWunECy+urWrilhAEQbQMmq1gpBrEyXtlJkqtJQiC\naAyarWBkJoiT96rqyNQkCIJoDJqtYGS3EgXDaCfBIAiCaAyarWBkuMqDWLmmSS8jCIJoaTRbwTBo\nYwEBcIDW9iYIgmgMmq1gsAwLhteCY2x+Z0wSBEEQ4SWiglFcXIwHHngAd9xxB4YPH47PP//c736v\nvPIKbrnlFowcORJHjhwJ+fhq6AC1A1Ybre1NEAQRaSI601ulUuHpp59Gt27dYDabMWbMGAwaNAid\nOnWS98nJyUF+fj7Wr1+P/fv3Y9asWVi2bFlIx9cyMbCrjKgx2xCra7aT1gmCIJoFEbUw0tLS0K1b\nNwCAwWBAp06dUFpa6rHPhg0bMGrUKABA7969YTQaUV5eHtLxdaweDCug3EiZUgRBEJGm0WIYBQUF\nOHr0KHr16uXxeWlpKTIzM+X3GRkZKCkpCemYsepYAEC5iVbeIwiCiDSN4scxm82YMmUKZs6cCYPB\n4LHNX8BaKtAVjLS0eCTHJeBcNWCFrcE1UpozLfnavaF74YbuhRu6F+Eh4oLhdDoxZcoUjBw5En/6\n0598tmdkZKC4uFh+X1xcjPT09HqPW1ZmRCyrAwAUlFe02OJiVFjNDd0LN3Qv3NC9cHPJFx+cOXMm\nOnfujAkTJvjdPnToUHz33XcAgNzcXCQkJCA1NTWkYycbxCUQq620tjdBEESkiaiFsWfPHqxevRpX\nXHEFRo0aBYZhMHXqVBQVFYFhGIwbNw5DhgxBTk4Ohg0bBr1ej9deey3k46fFiYJRS+VBCIIgIk5E\nBePqq68OaV7F888/f1HHz4gTLRETRxVrCYIgIk2znekNAGl6UTDsrJFmexMEQUSYZi0YerUOKl4H\nQWuGxeZs6uYQBEFENc1aMABAJySAibHCaLU1dVMIgiCimuYvGEwcGEZAhYUm7xEEQUSSZi8YWlYL\nALDY6pq4JQRBENFN8xcMlQYAYCLBIAiCiCjNXjBiVC4Lw06CQRAEEUmav2CoYwAAFoe9iVtCEAQR\n3TR7wdCpRQvD6iALgyAIIpI0e8HQa0QLo85JabUEQRCRpNkLRqwsGOSSIgiCiCTNXjAkC8NGgkEQ\nBBFRmr1gGGLENTFsPAkGQRBEJIkawbBzJBgEQRCRpNkLRrxWFAwH72jilhAEQUQ3zV4w9FoxhkEW\nBkEQRGRp9oKhkyfu2cDTmhgEQRARo9kLhlR8UGCcqKihyXsEQRCRotkLhlRLCiyP8xXmpm0MQRBE\nFNPsBUPFqsBCBUblREmltambQxAEEbU0e8EAgBiVDlA7YKVlWgmCICJGVAiGXqUDo3LQut4EQRAR\nJCoEI1YTC6idsNhoLgZBEESkiArBMGhiwTACzDaKYRAEQUSKqBCMeK0eAGB2UlotQRBEpIgKwTBo\nDQAAq9PSxC0hCIKIXqJCMGLVooVhJQuDIAgiYkSFYBg0sQAAG08xDIIgiEgRFYIhWRh2gZZpJQiC\niBRRIRgJMfEAAE5lAcfzTdwagiCI6CQqBCPLkAkAYPUmWG1cE7eGIAgiOokKwUjQxkMlxICJNVJ5\nEIIgiAgRFYLBMAwMSAITY0GNlVJrCYIgIkFUCAYAxLFJYBig1FTR1E0hCIKISqJGMOLVCQCACmt1\nE7eEIAgiOomoYMycORPXXXcdhg8f7nf7zp070a9fP4wePRqjR4/G+++/f9HnStSKglFlI8EgCIKI\nBOpIHnzMmDEYP348ZsyYEXCffv36YcGCBQ0+V6uYVoAZqLHXNvhYBEEQhC8RtTD69euHhISESJ5C\nJlnfCgBgcpJgEARBRIImj2Hk5uZi1KhRePTRR5GXl3fRx0mLTQIAmDljuJpGEARBKIioS6o+evTo\ngV9//RV6vR45OTmYNGkS1q1bd1HHitfpIDjVsKmonhRBEEQkaFLBMBgM8ushQ4bgxRdfRHV1NVq1\nalXvd9PS4j3eMxo1hF0aOFQ2n23RTku73mDQvXBD98IN3YvwEHHBEAQh4Lby8nKkpqYCAA4cOAAA\nIYkFAJSVebqerDYnwGnghNlnWzSTlhbfoq43GHQv3NC9cEP3wk1DhTOigjF9+nTs2LED1dXVuPHG\nGzF58mQ4HA4wDINx48Zh3bp1+Prrr6FWq6HT6TB37tyLPpdOq4Lg1EBgONg5B7QqTRivhCAIgoio\nYMyZMyfo9vvuuw/33XdfWM7FMAzUgg4CAIvTAq0qMSzHJQiCIESaPEsqnGjZGACA2UH1pAiCIMJN\nVAmGjhVX3qutMzVxSwiCIKKPqBIMaeW99w4sgpOnMucEQRDhJLoEQxsjv66xUVYEQRBEOIkqwcjW\nXSa/5gRaeY8gCCKcRJVgZMalwlnSFgAJBkEQRLiJKsGI02sBgQEAcDwJBkEQRDiJKsHQaVUQBPGS\nyMIgCIIIL1ElGDEaldvCIMEgCIIIK1ElGFoNC0gWBrmkCIIgwkpIgvHjjz/CZBInw82bNw9//etf\ncejQoYg27GIQLQzxkpxkYRAEQYSVkATjgw8+QFxcHA4cOIAtW7Zg1KhReOWVVyLdtgtGq1EBPAW9\nCYIgIkFIgqFWizUKt27dirFjx2L48OGw2WwRbdjFEKNhKehNEAQRIUISDIZh8P3332PNmjUYOHAg\nAMDhcES0YReD1iPozTdxawiCIKKLkATj2Wefxdq1azF27Fi0bdsWZ86cQf/+/SPdtgtGrWLBgoLe\nBEEQkSCk9TD69u2L999/X37fvn17PPfccxFrVENQs+IlUdCbIAgivIRkYbz++uswGo1wOp34y1/+\ngquuugqrVq2KdNsuCjWrAgDwZGEQBEGElZAEY9u2bYiPj8eWLVuQkZGBdevWYfHixZFu20WhUYmC\nQRYGQRBEeLmgiXu7du3CsGHDkJGRAYZhItWmBqFWiS4pypIiCIIILyEJRkpKCp599ln8+OOPGDRo\nEJxOJzju0uyQta4YBgW9CYIgwktIgjFnzhx07twZc+fORWJiIoqLi/HQQw9Fum0XhUYtuqTsHK24\nRxAEEU5CEozk5GTcf//9MBgMyMvLQ2ZmJsaMGRPptl0UGpdLyu4kwSAIgggnIaXVHjx4EFOmTIFW\nq4UgCHA6nXj33XfRo0ePSLfvgolRawAANuelN7GQIAiiOROSYLz66quYPXu2PMt7+/btePnll7F0\n6dKINu5i0GlEwSALgyAIIryE5JKyWq2yWADAgAEDYLVaI9aohhCjcbmkKIZBEAQRVkISDL1ej+3b\nt8vvd+7cCb1eH7FGNQS9RgsAsJGFQRAEEVZCcknNnDkTTzzxBLRasTN2OByYP39+RBt2scRqtYAd\ncJBgEARBhJWQBKNXr15Yv349Tp8+DUEQ0KFDB9xyyy347bffIty8C0cfowZMgIMnwSAIgggnIQkG\nAGg0GlxxxRXye0EQItKghhKrjQEAOCiGQRAEEVYuek3vS7U0iCHG5Ta7RGeiEwRBNFeCWhh5eXkB\ntzkv0RhBrEswnFQahCAIIqwEFYxHH3004LaYmJiwNyYcGHTiPAwnxTAIgiDCSlDB2LhxY2O1I2wY\nXEJG1WoJgiDCy0XHMC5VpBgGVaslCIIIL1EnGFqNCgLPws6aYeeonhRBEES4iDrBAABVdTvwagt2\nFu9p6qYQBEFEDREVjJkzZ+K6667D8OHDA+7zyiuv4JZbbsHIkSNx5MiRsJzXYGsDAKi1G8NyPIIg\nCCLCgjFmzBh8/PHHAbfn5OQgPz8f69evx0svvYRZs2aF5bx6jQ4AYHPaw3I8giAIIsKC0a9fPyQk\nJATcvmHDBowaNQoA0Lt3bxiNRpSXlzf4vAaXYJjtdQ0+FkEQBCHSpDGM0tJSZGZmyu8zMjJQUlLS\n4OPG6UgwCIIgwk2TCoa/elThKDkSFyOWXrc6bA0+FkEQBCEScvHBSJCRkYHi4mL5fXFxMdLT00P6\nblpafMBtrVNbAeWAE46g+wXCzjnwxf4VGNbperRNzLrg7zc2F3ON0QrdCzd0L9zQvQgPEReMYFVt\nhw4dii+//BJ33HEHcnNzkZCQgNTU1JCOW1YWOANKzbMQBMBkswbdLxCbCrZh7YnfsOXMLrxxfXgC\n8ZEiLS3+oq4xGqF74YbuhRu6F24aKpwRFYzp06djx44dqK6uxo033ojJkyfD4XCAYRiMGzcOQ4YM\nQU5ODoYNGwa9Xo/XXnstLOeN02sAXgU7d3FZUmaHuPysyWEOS3sIgiCigYgKxpw5c+rd5/nnnw/7\neRMMWoBTw666OMHgqQ4VQRCED1E50zsxLgYCr4JDcJcGKbdWYMf50GZ+8wIPAGCZqLw9BEEQF0WT\nBr0jRaJBC3AqcIJV/mzW728AANrGZyMrLjPQVwEAnCQYuDQXiSIIgmgKonIIHaNRgRXUEBgnzhkL\nPSrXStZDMHiQhUEQBOFNVFoYAKBmtHAywOu75uGWy26SPw9lnQy3S0oVsfYRBEE0N6J2CK1hNfLr\nXcX75NehLN0qCYaKLAyCIAiZqO0RdSq9/Fq5XKuDr3+NDCmGEY5Z5wRBENFC1ApGK1WK/FoZtwhl\nrW+BLAyCIAgforZHTNG6S4ywrPsyQxEMjmIYBEEQPkStYGTo3amzVoc7vdYRimDwlCVFEAThTdT2\niK1iDXCWtwYAOBWZUaEIhtMV5yCXFEEQhJuo7RENOg0c57r4fB6KS0oKjDMkGARBEDJR2yMaXAUI\nvXGGkCXCp1jIAAAgAElEQVRld4kKS1lSBEEQMtErGDo1IPheXiguKYerym3gwuwEQRAtj+gVDL0G\n4C9OMOwuK4T3M8nvaOUJvLN3AaxOq882giCIaCZ6BUOnBsAAgqdb6UJiGP7KiLybuwgnqk9hx/m9\nYWknQRBEcyFqBUPFstDHqHzcUiEJBud07Ru4jIhADiuCIFoYUSsYgJgp5R34DqU0iCQqoRQqJAiC\naClEvWAIvLdLKvTig1yQUuhkYRAE0dKIbsHQqyF4Bb5DKz7Iefz1i0CCQRBEyyK6BUOnkWMYrOtS\nL6SWlL8sKQmSC4IgWhrRLRiK1NoYVgcgxFpSLsvCKXAQAlgS5JIiCKKlEd2CoZy8x4kLKtVnYQiC\n4FEOPZQlXRsbk92M8+aSpm4GQRAtjKgWDH2MGoJLMBw2FipGVa+F4S0Qmwp/xz9/m4kamzFi7bxQ\nntn2Kl7ZMScka4kgCCJcRLVg1Nk5MBobAMBm0ULNqOsNentnRi0/8T0cvBMHyw97fB7IVdUYSFaS\ng6s/gE8QBBEuolowEmI1YLRiCQ/epocKGtQ564J+hw+QGcXg0itESPNECIJoTKJaMG7qmw2GFS0B\nwa6H4FTDytXBZuewcPVhFJabfb4TbO6Fkksh6B1KxhdBEES4iGrBUCmWZtVy8bDZWFiddVi78yy2\nHy7Bf77yrQcVapC7KV1SEqFMQiQIgggXUS0YADD96okY1u5GXNHqcjhsLHiBh91Vvtxo8YwBcDyH\n1afWhXTcxnIH8QKPUkuZX4FyCmRhEATReES9YHRMbI9Rne9AWis9BE4NAMgXDoBtVeLjVPr9/C5s\nLdoR0nG5Rhrdrz/7K17c/h/8fn63zzZySQG1diNKzKVN3QyCaBFEvWBIxMaoAac4F+MUvxsxV+zz\n2cdo941pSHjHLEKNdTSUPSX7AQCHKo74bCPBAJ7e8jJe2vFWUzeDIFoELUYwDDqNbGHIsKFbCd7x\ngkshQ4kEw82lEFMiiGinxQhGrE4NeAkGE+NtUQTudLznbzS6YPiLYVDQW+ZSEHCCiHZalGAIrvIg\nEqw+sAvKG+/OubE6a4YJPP+Dgt5uSDwJIvK0HMGIUfsYEIzOgnOlJuUn8ist6ykuzqa2MPzQWKVB\nmoO7J9CES4IgwkeLEQxlqXMZlQOzFu9UfODuGPVqnceuDq/RPMc3blFCf112fTGMrYU78N/cj33m\nlhypOI5lx1eFJARFpmL849ensOP8ngtpbqPTWEkIBNGSaTGCEatTgyvPgiP/Ctj+6A8AYNSi1eCv\n89ep9R7vvTtnrpHdQf5mltfnhsktP4Q/Ko/B7LB4fP7e/o+QU7AVxZb601G3nRcF9ZvjKy+gtY0P\nJQAQROSJuGBs2rQJt912G2699VYsXLjQZ/vKlSsxcOBAjB49GqNHj8by5csj0o5YnRoAC2dxR/B1\nBgAAoxI7mYpam+/+3hYG5y0YTT+ira+TtDnFCYoNcZ9J1gnLqOrZs2m5FP4fBBHtqOvf5eLheR4v\nv/wyPv30U6Snp+Oee+7B0KFD0alTJ4/97rzzTjz77LORbApiNIoOz5UtpTaYwPTKwYdbq9Cv9ZUo\nV1nlXXReguEdYG6siXvBqC/obeNEIQxkiYRyDbzLbaViLm1j9FKIKRFEtBNRwThw4AAuu+wyZGdn\nAxCFYcOGDT6C0RhBVYZhMLhnazh5HtsPl0DgVECMGSyAYuzAD9U74CjqCE2WuL9PDMPHJdVIWVJy\nIF68R8p4RL0WhkswuAD72UNY31wKJrOXumBcAgJOENFORHuBkpIStG7dWn6fkZGB0lJfv/n69esx\ncuRIPPHEEyguLo5Yex6+sxsevqOb+MZ7Eh8ANq5Kfu0tGD4xjAvooOqcdTAFmEXu5J04VXMmYNFD\n76Ra7gIEo04SjADHDmU9DU52SV3igkEWBkFEnIhaGKFYDjfffDPuuusuaDQaLF26FE899RQ+++yz\ner+XlhbfoLapoAUPz9iFKsEtGMnxCR7bzJwJKSkG+b2d57D5UDEcTh7jhnUJeq57v5kBAFg27gOf\nbR/vWYp1eTl4/JrxuKnjdT7b1WrRlabRqpGWFo86h3s9D61O3BboXkgWRHxiDNKSxX2U/xN9vLre\n+6g96Tq/StXgex5JEhJFgb+U29jY0L1wQ/ciPERUMDIzM1FUVCS/LykpQXp6usc+iYmJ8ut7770X\nb70VWl2gsrKLXzJ1wfQhmL//CE7XBj6GYPcM8p6qysdHO5a53xdV4cgffwAAbr4qK6Tz+mvzznNi\nrah9547gyviePtudnDjCt9ucKCszwuJwx1lWHlmLUd1uhana11LgBR42pyiI5ZW1iOfEc9tclXoB\noLyqFmWa4Pex1iJaRoLANOieR5qyylp0TmnY76K5wws8BEGAihXFvSXfCyV0L9w0VDgj6mfo2bMn\n8vPzUVhYCLvdjjVr1mDo0KEe+5SVlcmvN2zYgM6dO0eySQAArUaFWI0+6D7eLikA+CU/x/2GcY/U\nrTYnVm05jYIyk8936kNy9YSa5ePtevnm0Gq/+9kVwqAMeludVr/7BMLqWqHwUgx6K914NHEPeGPX\nfEzNiWzyCNGyiaiFoVKp8Nxzz+Hhhx+GIAi455570KlTJ8yfPx89e/bETTfdhCVLlmDjxo1Qq9VI\nTEzEa6+9FskmyfgTBCU6dUzwAzDuzmrN72fx4/az+P1wMV7/+8CAXymrsiAtKdbjM6kjrr/DEwXK\nWzACxSGk+IX3d6yKJWrrW99cuf+lOM9BKRj1TaS0OutgcViRok+KdLOajAJTUf07EUQDiKhgAMAN\nN9yAG264weOzKVOmyK+nTZuGadOmRboZPsRpDEG3/7b3PBDMCFFYGL/uKwQAlFZZfXZTjuif+nAb\npo/rix4dkuXPWFkwgge9pbN575cS678DVLqeLE4rquqqkaRr5SEY9hCC3lL7bSFYI42NRwJAPSnG\nL/7+JowOE+bdOBtq1v2zLzGXotRajp6p3SPWzsamOZRyIZonl56foZFI0rUKuj2voB6fp8LCsNrc\nnVVRdSU2F/4OO+dAta0GT26apfiOgEOnKzwOU59geOM9klZ2fkqk+AUAfHzoCzy7bTZMDvNFWxj+\nBEMQBNQpjtfYKDPV6nPpGR2iu9A7PfqlHW9hwYFPfWbD+8PqtOLzP75BhbXyIlrbeDjJPUdEiBYr\nGCm65OA7CAycFZngqlMx54aX8UC3cR6bGdZ/B/W/wxux9NhKzN37Ac6bSzw3MoJP9VlVkBhGjckG\nh9Pzc2/XVSBXkY3znb2+pXAHamw18vtQ0mqlOIeDd/iI2rLjqzB90/Mot1b4+2rEUbraQk1zDnS/\nQonnfH9yHXYU78HHh74MrYFNxKXoPiSig4i7pC5Vkr0sDN6mAxujHC0zcJy8CgCgZbXINHhmd0Hl\n+VBe2y0duXnlyCs7DyQA+cYC6FRecRKWh3e1cqnkhj8LY+p7WxHT3Qw2zl1LyltYvEuWSNT5EYzV\np9Z6vA9l4p5ytGrn7B4z4DcVbgMAnKw+g1R9Sr3HCjceMYwQR9UNEQyTy0qxOOu3RpoSEgwiUrRY\nCyNZ5+n7F2zeAQu3H7isxoo4lWc6GqPiPPbpnJ2I7NQ42AWl6HjXU/f1LdfvkmI8DiUJRvuEdgAC\nlzgPJeZQn2BwPOfRrkDHbKpJc6FaGMoONFCZFBtf//2S7r3qEq+rVZ9gVFirkF9b0EitIZqaalsN\nqhWehYbQYgXDO+gt2Dyzl27sm4Ur2ohzRJ7+cDvmfn3U9yAKK6NVXAwMejWgltJQVT6dE8PwqLN7\nfsa6TI5QO13JJRWj0gIAnAHcSsoYRiCCuaQsDisWH/Z0vfhzcwFNt3iRMp4T7P4p4xPK4LiHGDpD\nEAzXdarYS08wlIHu+mJTXx/7H+bt+zDSTWpSjlXm4VD5kaZuxiXBM1tfxTNbXw3LsVqsYDAMg8d6\nPSi/97YwDDo1+nV1u6GKyn3dEIzKgZf+ei1u6puN3p1TEafXgNG4C/6dr/QKnDMCrHWeoz+3heFp\nfQRKE5U6Rq1LMCQLw8bZsf7Mr6hzCUWgzl1JMAtjw7lNyC075PFZiaUMB8oOB2xTY6M8b7BAr1Iw\njledxObC3wF4phjbQ7IwXIJxCVoYSvGrb2GtKlsN6jibj1X2R8UxbMjfFJH2NTbzcxfigwOfNHUz\noo4WKxgAPFIp777OM62ydWoskhM8YxBclVccQ+1Em7Q4jL+lCzRqFgadCtCIHQ/DAHvzvIPePCw2\nz4dZFSCGYbJ4duZyDMMlJLKF4Xrolx9fhVWnfsL3p34C4D+GIZERmwYAcChcTA7OgV/yc2C0i356\nf4Kz4MCn+PDgZzhZfcbj86bymXtM3Ati5Zgd7jpey45/h6XHVsLssHgISSguPOmeMD4Vvpoeh4fb\nLfj/w+K6bm9h+e/+j7Ei7wd50EEQ3rRowVCSluBZO0rNAu3S46BWsejdKQWPDu8OR35X8DYdeKvo\nzrqqS6LHdzQ6p0dQ21jnNS+DEXDwVAXyCmtQWi1uqzWLwuAtGEYvwTBZXYs9ebmkpIf+ZM0ZAJBT\nPoN1gFL8xq7oMH49twUr89bg08Nfi00N0imWeC28FErAOBJ4xDCCpNWa/KTMztj8AnaV7JPfhyIY\nUgFJK+c736bYXIKcgm1NNgdC6WoLZmEIgiALRqC5K6GkGEcLds7hMVcq2gj377HFZkn5Y1BWf2wt\n2gEAiFHHILWVHv+degM0alFX+1xxO5Zu6AxD+3P4teRnDL4q1eP7lapTgKKfL60xAcpYOcNDEIDZ\nS/YgwaDFv/7cB+fKjFAleqbLWm1O5Jd6urOksiOSsGjlGIYTJocZJRaxxIqaVcPisOCcsTDgdcZp\n4qBlNXLHAQCVtmoAwDmT7/c0rMbDL27xesCCWTNKzhmLkBGbKre9oXi4pIJYGJYAHeCPp3+WX4ci\netJcDmU9L4k3d78LG2dHRmwauiZfXu+xwo0zRAvDwTtk951yP2XHYnKYomZGfH3p1s9ufRVmpwX/\nvfnNRmpR46J8RsIhHi3ewojXxgEAErTx+EvXu/HMtdNwW/uh6JHSFQBksQDERZgm3NYV2UliSm4d\n79lRnnEcgsCpwBld2707IUWWlIk9j+NFJfJndo7DkvXHsOdYGWYu3I6PfvAM2Dk5Hk6Od1sYrGRh\nOPDt8VXyflW2Gry+az6OVeUFvOZYjR7JuiRU1rmr80rBd3/ZWglazwyxyjpRXCQrxBrC5L2ztefw\n+q53sPDg5363VxltKKu+sJGeMs7zw+l1KLf4n1Bn5epvX7CYjyAIKLGUyddpddb5PHyShVJsrn/Z\n20igFMxgguGRAKDYz6xIFfZnkYWTs7XnsOb0zxGzxpTHrS8T0NyAFOkySwUWHVyCKtfzcCmiHOiF\nI9bY4i2MGf0m40TVKXRKbA8AyIrLRFZcZtDv6F2FCyVTluM5LD22AkauGoI1EXCIdagY1hWgFmJh\nZyxol2HA2VPA4GvisYdZi2+LDgCMmJ1VXmvGmX2F+HVvYMvgx9/PIqOD+ANgoQEgPvSVio6y0lol\nj4SVZBkyUWQW1xqJVeuRrE9CsaUU5dZKrMxbA6tD7PAkwVC6pBK0caioc5+jwjVRT8Wq4OSdIc32\nliygI5XH/W6f/t+tAIDF/7653mNJeE9iXH74R9zdfqTPfqH45INZGHtKcvHJH1/L7zmBg513yG5B\nQExe4AUelbYqf4eIOMpFsoK5pJTW4cZzm+HknfhL13tQY6uVP1fGfCLBm7vfBQB0T+6CDontwn58\np8e9qH+uESA+wxea/fbl0W9xovoUVAyLh6+874K+21jYudB+F6HS4i2MZF0S+re+2mcGdjCk9b6l\nEefRqjxsO78LACDYdRAE1211CUasVtz/nps64L1/3oD4BHEExGhtYFwlRjiNCWCD/UMFfLflNL76\n5RgAYOfhcgDixD2T3YxkXRK6Jl3uVywA4OqM3vLrU/kWJKpFK2jx4S+RW3YQx6rF43I8L8dLJOK9\nLAyp85dmqQeyMEwOM/aUiOXbA5Uw8b7GMkvgWeMlljIsObJMdjF5xy10AVxddSFZGIEF41DFMZ/P\njHZPl2FyjHg/KwOMNo9UHsdHB5eIAl9XhUkbZ2BL4fZ62xUqTiE0C0Ppnssp2IatRTvh4J0egmEK\nQTA2FfyOY5WBrdhQECDAyTuxMX8TamzhKz+uFIlgqeOemWWe+31z7Du8vef9oOepC1I2JxR4gceB\nssMRTRpRXlc4ztPiBeNi0KtFC0N6+DSKkYmejQMEl/i4BEOvliwOAbE6NZLjFSm8CjeVpp17rsdl\nGfEY3Ku1z34Wm/jjLCqzQsWo4OCdMNqNiNfGITkmcLmTGJW7+u7BE0YcPyUe52ztOY/9HByHqe9u\n8VjqL8HltpMos1bAZDfL0xKVMYw6uxN1dvGHOW/vh1h8+EscrTwBdZBUVMmFoG57DC9sfyOgFfLR\nwSXYfn431p39FYCviR2r9V8tMpQ5KcqHnuM5HCz/Qy7O6G8sccbrvsVqREuxIkCZlPdyP8K+soPI\nNxZgvys1+etjK+ptV6g4L8LCkKhz1qFaaWEEWB0SEDvg38/vxjfHV2J+7sKQyssEghd4bC3aif/l\n/YBFAVyVF4PSDRXMwlC6IVfk/YBFB5fI7w+UH8bJmjNBxUD6/VucVvzvxOoLDp7nFGzDhwc/w/IT\n/pcoCAeOEO9FqJBgXARS3ENKQVVWfb217+W4tovo0hJngwN6jdhZSx1cWqJSMNyjHFXKeQzskYGJ\no67ErIeuwd1DFGufS8Ii/RVYsGBRW2eEU+CQoI2Dw+w5+VBJnMbd6QucBtZa/6NxhhXAXnbAY/Tl\nbWEAwKmaM3JnoXRJTXx7E6bM2wxe4GUXmNlhCVpcUSreqE4XO+HD5X4mScI98pUsGu+AZqBg+rmK\nwD7m7DhRlJWdx8Zzm7HgwKdYmbcGgKd7Ttr/lCsrTULqpPy5v5QWmCAAWpUmYHsuFs+02sAdg9lP\nwP7fW17C/rKD8vtgFsaPZ37BF0fcC4ntU3zvQrFxdnkGcr4xfDPPleVygsUwlMkLW4t2IrfsIIrN\npR7t8rYk/XGq5gw2ntss/15C5axrtv3hCv+/93CgFHRySTUR0ixx95wF9yhEzaoQqxM7LrVa7CSl\ntTUkF4pe5+6ADLHukTdvTMLfhveQJwwmGrRITxbdWR2yXJ22LBgMnE5GDvSWlfPYsivwjztGUMxs\nd2qgsicG3FedVugRRFXOio9jxJpRO4r3yHNDvF1STk7A3L0L5PecwHn8WJXBdgAwSi4wTrwXoUyi\nA3wD9NJs7RqTDUvWHYOlzoFqkw1nSgPHFeQUY8WDddbVeW0q3IbcskMe7spOiR2gYlTIry2A1WmV\nvyfNafE3Ij1ZfVp+beftcsJCOAk1SypQHaxDik4rmGB4lxTJU1zbhWLjbBdcrTkUPF1SF2Ztvbzj\nLfx6bov8vjaIYAhepX9MfiyzIlMx9pX6F1XJMyH9vywOK7459l2DqiGXmEux/syv8v20k0uq6VGz\nasSq9XK8QBkwbROXJfv2e14udsrSyFcaESs7z1idCq1ixP3aZvq6VGK00r+IR0KsBnBZLe0zWoHn\nGFmEzhU5INTF+XxfQnC4JyEKPAsVFxd0EakCxahco1LLbazKF91eylngUozA5nCLjHIE/sXGQ/hh\n+0n5/XPbPBfJkiYpCrxLMAK4Obw9QzUWT6GycXYsP/49nt3+MjYVbcXKLSfF+SyqwA9Ksi4JDBiY\nHWbwAo8jFZ7usEUHPwerOHOsRo9WMYk4XZuPJzfNwqeuYLjUZn9ip3Rf2TmHd4WxsBCyS8qPhSEh\n/U4tF1Cy/nTNWZ/PVp9ah8m//tuvi0b5v7VxdlkwvDvfhhCqGybQvZAqAQBA7QXEVjR+LMdXd76N\njw75z6RSueJ60v9u7dkN2FS4DYsPfxXyOb15ffd8rDr1E/5wxd3IJXWJEK+N97EwhrW7EV2TL5dn\nb0sTo6RsGs5P/jsncFCzamhYNTR+PBXSSIETeKQk6sGoxXON6H8FILj/fYJDi9v6XBGwvTaz4uBO\nDVgwaBvfJuD+J8vd2Vqck8Hw9L+g7vBAOM939D02Zwcv8O7Z6V7B+zrOiqLKwMvXykF2WTA8O90q\now3Lfs0DL/UprpjH4TOe8QK7045DFUfAs3ZoLzuKU/wu1JrtYLwE4/FeD8mv4zVxSNYlodRajg35\nm/De/o+wr/SAx/5KC0PLajwqHR921SuSihfaOLtPuqgyTnS2tAq/7PXtZC+UIlMxlhxZJrvAQg16\nB0sjzTZkQqvSXpAv/ry5xCdLbu2ZDeAFHqf8iIkyA8vO2SNSZsXOhSgYAa5TmQAQzMLwRs0ETuyo\nqPO1ciWxlP5fVod4H6v87Bsq0rMjXZuHS6oB8SYJEoyLJF5rgNlhAcdz8j/p8iQx5iA9BNLnsoXh\n6vyVI0CO56BiVNCyWr8/bqnGlCAI6N05Ra5VlRrXCjq1wrXhiEHrFIOniHDuhzG/xIwXBjyFbNMQ\nCPZYgAF6p/YIeH2M3v1gf/5THhb+7zQEcyJ8x/kitVYL3l1xANDYoO/3i+ex1A45xVjim40n8NbS\nfdifV+4jGJVmz07tvysPYu2OfNSaxftZXiM+WBVG8aEQHKIYHj5bCqvdAYFTga+LRbHqEEqM1T4W\nhkHjjvXEavRIj02F0W7yqZ2luAL5lValRasYt2BwAg+O5+SHkRd4n7pWysmQq7efxKlid4fgLS5O\njkdFTf0j/PdyP8L287uxqUAsMa8UiR/P/AKjzb9AW4NYGMm6JMSq9bA4rDhaeQKTNs6o1+UkQEB5\nABeKt+sRgFc5FltY1op38k6PtWeUz1GwVSUDueeU1s6FCEawisf+3ExS3Ez6vbBs4LVxLhZPC4Nc\nUk1GvDYeAgSYHBb5Hy9ZEtJD4BYMsUOT6h15+JsFJ1QMC41K43cugKCwMG7vfxlSUljX+eOg07qt\nBsGuQ2ZyLFSu+RlcTQrq9ruXxt19tBSp+mTE2kSrwmx14I+9Bqh5/5lFUsBePLjnz8Rxzncm88b9\nZ5FfYoIqsVz+7NrMvuILlcMjuA8A63adxR9nqjBv+QG5DIrkkjpTKprvuSfK8d6KgzhVVOvxXXOd\nuH+VSez8JKsnv7QKRrsJgtUAriwbYAScNp72sTCUgqFX6eTaWkWm837vhTK4rlVpEKd1f1/sMCs8\nOhnl/5EXeE/fNst5LL7lHfP46ufj+NcH23D6vOc1eyN1ZFKGmrdVsTV/N+ycHW/smo/NivRdY5AM\nqDitAVomBhanFd+d/BEAsO7sxoD7S6nSyjk6Skot5T6f1Sg6YBtnv6B09kB8/sc3eGXHHLnGWUNd\nUkqCxjC8xD5QRQEAKLX63gvJOpS8CNJAM5yCYQ/RVRkqJBgXSbxGypQyyg+9JBisK5gljTiklFZ/\nJRmszjqoWRW0XuU3JKQfDy9w0KhZpCQzYBkWsWo9DFq3hSHYY5CRrIeaEQVDcMQAzhjYT/cA8gai\ntNqKFZtO4dAp8eGuNtmx+0gljLuvB28LHMsQD8bg0eHd8ehwsUCj83xHOMtbe+xSXF3jOq9bxDJi\nxeA9o3YA3isUKiyOo/mukSgv/hwZlsO+E2WY/78D2Hu8zKc5VpuYumu0ivdXcLrOqXaAUfEQnFpw\ntWLZlmO2XWDUTgic+6fOcu77tvC7E+CtogAEyqhR/l9ES9DzwSu2eLZRKRhWZx0ECLK7gmF5D/H0\nniT3W26R2O780GYPS0LlLRh6tQ7Hq04i31iApcdWYHdJLowWO44WBp6JrmV0OF/mgNXhDuaXmMvw\n780v+V0/o7Xr/+s9ek5yWWD+Zr1vK9opv7b5qZh7MewpFef6nDWKrj9HiC6pGntgUZYyIWvtRtTY\narH65Np6Kxr4EwzJ7VTmRzyVrjzR0yAlAISv+rPTI+hNLqkmQ6q1U2otlwVDK1sYomBIIyzvGIZ3\nh6NiVNCoNKixG7Hx3GaPbbwsGOJfo92EOI0BLMNCrxCMB4f1RnysFipB7DylUTVX1hYPDL4O+hgV\n1vx+1qeMOsACXPBJdXcO6IABPTKRKqcDM4hTeWZZ5Ve4On2lMNh0EHgWsQbBQyDE/dzvD5wtAiC4\nM8BYDut2es5zUFJSZcX+vAq54xVc7We04ohRx+qRrE4DBAZmeI1+BeDpD/a43zq1yDsWPGvpWKH7\nYeccLK5OFydBSpaJdzFGKY7x+dqjWL5dLHAYKy3AxXIe114TYATr5HhY6hx44ZOd+GW3773wHpl7\n19Kqc9o8XEKfHP4Kp4trRGsvAJxdDTjVAAOYXPG5irpKGB0mfHlEnDOitKQyDRkAgJ/zc7DkyDL5\nNyp1kiavSaS8wONg+R/y82F12MI7ac3121aOqpceW4kKi/+YQHVd4EWFLotvCzWrRq3NhC+OfIu1\nZzdi+Ynvsfrk2oBVAcyKmAgvCHj3fweghvjb8mepKEvWmJ2WiFgYlFZ7idAmLgsAUGAsCuiSktCy\n7iypM7X5+PbEKo/tKpeFAQD/O7Haw+8qPaBSR2+0m+XRj+QSiFFpcX3PtgDcJUOULqWMZD06tPas\nxqtEUAhG2/g2MDCey9cmxYsWkkHv3q+Dqg+cFa3hrBDnnJQbxc5BGatYsek04NRAq3P6rIF+x3Vt\nkJKgAxNjgb7vr8joc0zuSBmVE8fPBR9hf/j9YberidNA4FkwWvEBTI1LRFK8HoLdbTk5Czu7rlUD\nTtkUpxrxbBKuSOoc8FzKTBmjmccVSZ3wxuBZGNnxDgDAqpM/eexv42z442wVck4cwU6H+L82Vrvi\nSSznIarKAKdUYBIA6uwczhYbkV9iwle/nMD5Cv+uJOlzKcGiZ2o3AKJlU+iaByNxrqpCtPYCYLOq\nIHDi78c7OJ5fVo3P1h7FiQL3/0XKyqu1G7H9/G6UWsphcVhlq8nG2cHxPH7dWwCrzYlauxGcwKF9\ngp5oOOIAACAASURBVPhb3ZtXHHQdk/rILzHi87W+cxi8rYrdhQd89gEQdBW6VrpEJGjjUWs3oszl\nTtp+fjfWnt0ou+u8UVoYNSY79p0oh80h/l/8xVKUc3bqnHXyIMDJcXByDRMNwY94kmA0IZJgnDMV\nyqmUsmB41aSJUQS939nru9KZaGG4R7nVNvdDyStcUg7OgTquTnaHSSM8pR/8xvbXAABu63a1/Jle\n67kYlA+u2IGW1eDf10xB70zPGEVGsmhZGHRud9O9N3ZFRu110NSJrh9G5cSgnplIT3Ffh7MqHQKn\nhsBy0Lg+7p7cBQDQ/8pUvPH4QDx0t3gfazVnkJHsmhGvtfmMhNtlxEGjEn+ukoBJGWOJOgPAq8Cw\n4kPSp0MWurZLguBw3ffaJDgrxPPI7isXAqfG+UoLLk8ILBjSGicAUFhqxQ/bzmDOV39g8Xdn/O5+\ntrQam/cXQRXvtm4cVnd9MaV4StkzgiDg+Y/d7poft5/F5+vcJUmKK7zcHa6B/t7jZeAFQR5JsjZR\n7MtrjSg0esZkfij6Fow6SMqthREtDH+onMjJLYLD6bYwtu2rggbuCgKv75qHf22eJcdV6pw2/LQ9\nH0vWH8dHPx3Egv3igkaJajE12+qou6BUT++YwSuf75FdeEq8j2nQxsLisOD1XfOw8ODn2FYklvGp\nCiIYBnUsErTxqLJV+0zGlFxw3u2p42zILTuE/NoCVyKHIKfB+0u3Vrqk6jibey4KI3gMHi4Gd4IN\nzcO4JIjTGtAqJhGFxiJ5wphkSXinCmoVLil/D4hoYbgfVGU9ImVareRzTYgR3RvXZw8AALR2CQcA\n3NbxBjzVbwqGX+Eu4hejVWFI7yz8c6y7nhQA9OjgKiXisjCkH1m7BK90W9dzEatztzG9lR4v/bU/\n+nfJdl2EExNu64qh/cTYRrZpiChEvAoO3o6+XcRzSQFnB28HyzAex9QoPEOsoQa9O6Wg7xVpmP/E\n9XjhoWsRFyvu0L2Dq/S2a7TMcFqPjLAEXRwG9cyUG945qxX6XS7eIz2rR5u0OGRX34K72t6FyzPT\nUVJpwffrA1s0jEIwNueWYMWmUzhbbISxxn/n+vnPf+Do2SpZsADI1k5KksYjhiH9rw+f9g0cl1S5\nXRw1Fs8OR9lXlVVbsf246LbatV+0hn7cfgJnazw7U9bg6xaRKisDgMkICLz/a2Jj6qBuc8xzXXpe\nBdbhntTp/du2cXbkl4jnPG07iHMmsT0qh8s9p+LgVEysyyuowRfrj+G3fYU+9czeWroP73zraSl4\nj8Klxcm800ctDisOVRzFOWMh9pcdwpdHv8X6M7/6WBgjO90uv47V6OUqzd712Q5VHMU3x1bKMTQA\ncvHSRQc/xxu756OwugxgBDByNWpfwVC6pGzO8LrnpGOFGs8JFRKMBtAmLgs1diPK6yqhYdWyZeHj\nkvLKkvJGxag8ROb387vk0YskGIIgyJN/pKBia0MGXrp5Oib2flj+LsMwaJfQBizDYtq9vXHnwMuQ\naNCCYRj06pSCuf8YhFZxYkd2W3+xUqjUmUkxlnZe8zPaxouioFb5/lx6tBU74lv6t4ZaxcqB41v6\ndUC7jDhkJsXDzjnkY0uCIVlFyrIbyh83a6jBwCsz8Y8xPRGn97IKwGPGn/ugXZY4ur3vph5gBPf9\ni9fGISMpFhkpomWk02rwyJ09wTIsumW3xkt/vRYzx/wJt19+Azq71m231/qWP5HvqZc7rX/3DDw3\noR9iNQGSBVgOtRbPQL+0BHDHbIOHu7DCWonCcjPeXrY/4PkBYEXOKZRWiVZGrcXunpMCYMfhEhTX\n1Hich9GbwTMBgvjn28uv7Sf6yq/PFtqgjg2c6aPJOu3hchR4Vs7K80ed04bjLheWco7Clj01oguR\n5WB1uNs4+8td2Li3EJ+vO4YPvjsEQRDwyY9HsGTdMfxxpgoHT1WgpDJw+77fKqYAe5ezt9itOFzu\nWUBylWtlSo+qzGp3XM6gjgXnDNw9bir83cPF0ye9F2IYd8ZhbuU+j1iVOFdJQG5eOTieh51zeIhI\nHWfzOJ7VduGuui0H3BalFOBWpjF7u04vBhKMBtA2XnRzVNZVedQx8rEwWLdLyh8qRgWHYvWz3SW5\n2OvK/JBiF5zAyyZ0kmLiWNe0znJ5C2+u7JiCu4d08giQJsbFYOb4qzH13t7o0T4ZN/fNxg0dPS2P\nrDh3BtT8G19DnNY9ipw0+kpMH3eV/D5BL25jY0SzXfLVJsXG4oWHrkVKnAECBNn8lgRD2i/QDGVG\nZ8aVHVL8XpeTd6LrZUnQ6DioGRWu6pSJtqnuhz1VJ1ozIy+/BQBwU9vroVVpMfmqRzCm83CPY/W5\nXAxcQ1BhSpfpSFSL91LgGXBVab4n59T489DL0aF1AjKT3fdFa0uFI190t0mxlaREdycpxYlsvN1j\nguahgkJ89pPSDy8AfmY9m6wOvP7lXgDAb/sK3fswYhVjyT0nCQYbJ7q6HIWd4Ch01yRL4LPAlSkG\nBAoXXU0N0DnG/b/1hxQnAgDwKhhNQWZoMwJqXbPxVayiq3HEiGVgWA4HTisyzBQd7JGzVfjw+8PY\nfOA8ck7lQttlF8A68dOOfJQHWTfF5uDkOSh3d74LALD7RAF2FfiuRQ8AYy6/S3596A/Fb9GmwoFz\ngRMvAICHYrKknYWzzu2eO2M+6XE9ds6OX3YXYP7yA/jqtwN4dturHseqc9o8LACr7cKsDUEQsPhH\n9xo6UhJErSN8VYABEowGIcUxAPeoH3BniUhIMYxApcfVrMrHjD5SeQIAwMMdw3BbGIHrQIVCaqIe\nPTuKnfH9t3TB2GuuFdvhEjoNq8bjvR7C9Ksn+cRjru6S7nZlQbRGtCotDpQdhiAIsq9WsqqkYL4U\nRI2VBUOaGe32Dzt5p3xt6rQiLM37xm/7pYfB7LDAoIkFwzAea1Ok6sX29UnviblDXkGPFLEjvyKp\ns89Kch2zEtCvSxr+b+jl6JKdgSSdmBwg2HXgje7rFATAfrInBJsBCQat6z66LYwJfYaDd3XWOh3Q\nKk6L63q5V2S8vpM4SdLBOaBz1RLjbTowGjvyCsWBABNjgf7adVBnncTrfx/gc93VJjtKq61Yt/Oc\nPDKWrB9GbYeKUePqTqIYMCrX78acgPuvHiofQy3o5fRlEQa8yfV74tS4odOVuFn1NzhL2vqcH3DF\nlyR4Vly22BK4JI00adIuKOam1BmgZrSAygmrXXE8ZSYd60Su8ycwhhrEdNkDVWIFVClF2LS/CDMW\nuEt3eMByWPDdIew4kQ8A6JjYAQBwpCwPjNbXJZSh6oCuSe543bY97s71fKkDzoLgKydyikHemUIr\nHA7FhD+hwmP+j5134Gi+6HbcVrJNHvnzZvH3dryoHFa7u42WOv/WYbG5FL8VbAXnyqKTz+e1pPNJ\n1+RQk90ENatGliH4Gj+hQoLRAC5LcD9UbeLd4uHdyUrvt5/f7fc48Zo4H/+lVE5CDnpDkJdRVVoY\n4UCr0uLpa/6J5wf8S/7sytRu6Jh4WQjf1aBnSjeU11Uip3CbbDlIQiFZXmaHBQwYxLpKw0uCoSyN\n7uAdiNfGyYK7p3S/h59Z6iTdxdosMLgKI2YrrCKdokZWfcvBsgyDiaN74pZrxP9lgk48HqOxe8RF\nBEsCuIps/N/N7uB4crwOzhLRrdcpuS2GDxBH8tdfnYSX/tpf7gCnXz0RE/7UGxpWDTvngJQNLdh1\ngNoOyVro3Uf8q2mTh/SkWEwfdxWm3NPLo70vf7oLVpsTLCvei/atXZMI1Q7EaWLx+IjeHgMWwRaL\nHm2y5ffdsjIxqKf7t9o+Mx4JRTfCunsYAAadshPx/+2deVwV57nHfzNzVg5nAQ77JqsiKosKLkQR\nCbihUEEbkza9as1iNKJZDPfT2BtTc29MbZO0ualNW5PWW1vbmn760U+allSjDcFoJGpQEzSKGAHZ\nZD/bvPePOTPMcEBRIQq833/kzHZmXs+8z/u8z/P+nonR/nBcSgSxCyNm4lAjtKNnEah0bcKB2LyQ\nZ30Yhro02M5NBukVNBen37odwv9znCMLcGqg5TTCPlk8R+69qAIvgfO5Bm1Cec9+WYbX1cYOT80B\nlsdn5xtBVN0gThWOfCp00Jyx7/gU59LDSyW0n2A0e674/se14Nv84LjiKYUjIvcwyk8rZWoYlgdr\nUK7zOPWVkH7tsvd4daLBOHSyGqdk3laHree9IITg9FeNuN5uw39/8ir2fvFX7PzgQzzx08NobhOO\n612t8kRVvbBWyd4OL9YAa5fyd3S7UINxB8g77iCvniyk3lNS/ckf6DihY5sTniHN/U+0jke0eQyu\ndtTBxbsU6zDEvHq5NzNYhBlD4Kfvv57GjciLnge9So/3L37gIYciehodjg6oWZW0XXxeeYaXg3dC\nzaolowIA+y/8Q1rcJV+kxhMeXc5ueLmrH4qyLHdKsv9EAO6OThYAJg4tCu6LQk5aT4U4J8/DcSkB\ntmM5MGq8kZuYBL1Kj+MNn0KrgSx7Tuh4NawGNt4OTkVAeAZwaIRaGyoHOJZBcoxyCiw0hMMvq18G\nF9ijydTR7cS89AhpiifIXweDTgVG7YC3xuD2tnqmRhKCQ+Bn6mlPs16PlfenwnI9Gd2fT0eo1YA1\niydImXI+Ri1iQk1ISwiAUSecp2I0eHC2ctoSALzcWQrTxgfC4owCf90ffKcyfXvt0gSEWg1wQmiL\nU+e63OfqBM0xWZxHrhIgBtcZlhfaCnAnORCwxkb856/+7TFxJ8ZXGLUdxKHBB5/UKfbbKtNhvzRO\n+nypxoGNPz2G7opZsJ1JUxwrZdO5biBFL4/nuFRwNgiG2KoR+gLWolyf4xINjCxxQBrocE5FfZc/\nHDqHr6624vi5a1i/cz/+96v/wVN7/iBNW50i74PRt+HVsv/D2x8cx4ef9coWY3gcOXkVzV1taGkB\nPjndf0bYrUANxh2SEiBYbrm30dtA9Bdj2Dj5MZSkFcNP7yv9EERxOwKC6/ZWRfC7svEcfLQWqZO8\nV/D38kOUKQLX7W3SQjRRuVM0EDaXHQQ9noetjykpAgI1q1ZU5/vo6lH84tTbwn638XQSJzocnSAg\nkocxzicWLMNibrjnSPhWmBqUgimBybBfHA9Xqy/CtNGwXxoH+4UJCPBR1htJTwgEwGBFttAJ6VQ6\nTAlMRrujA1931ErZc+J0mU6lQ7ezG3odAxWrgr9RmAoK8ldhTLARKq5noOHiXSit/lBow8gz7pgE\nD4Bgycwo6Tgn78SP1qSB4ZySDL3ObTC81QY8tUxIsxYHGeK6hycz85EcEoOiObEIdD9XsJ/wL8ey\neHTJBPga3OnbZnOfv+HiolQ8/70p8DXpoFa5f/O8crBkNnGIDjH1dK7u/RaDl5AGLXbyYBA/vu9p\nGDUjPA+r6wQXcBnahE+gDvvS80CGF9pIZQfH6wGeAyE9XgPf5Q1X3RjpsxhXInYvmLz0eHZFirTP\n18uIZx5I8UjD7o8N30rF+sw8lKQVY1PaI9CwGqj8lOtgGLcop+gpWfV+SAoRPFaGcyoMEKPtxNa3\nj+Hn+07BbrootIOswBqjckI38d+4pjqDcvu7ioC38F08/lZeBbA8DGoDHsicOKDnuBnUYNwhDycs\nx6bJjyNeNsKVT0k9NXktNJwGP83c5nFugN4qjTASfYVOJ94nBmatMEprsV33kH0eihrIg0GgQRgd\n17QJQntioF/DypMBWMmA/O3Ce/jHpYNSpyiiYlWSYRCpbDwHF++SYhdO3ikV3BFXW3upvbC78DUU\nxC68o+dgGRb/kbgCrvoIwKHDipgHhU7GqUVyrFVxbEyoGT8vnoU5KT1TPqKnea2rUbagU+u+Rz06\nHZ1w8k7o1RpMiRXiDQ8tiMJTy1MUWTPvnj+gWPWvHV8Ofdr7CJh6AloNJ02e2HkHCCecJxoMsY1N\nssJXa5NXIdFvHLLC7wMABPh4Yd3SSTAZNPDWq7FtzTT853d61u4I1xE6S51KA2+1AepeZXYtBh3G\nBAm/VdFgsHaD4hiby4as1DBpPQJxqcCxjJRhpvfiwTEcfHUWNNtasLZgAnpj1rsNmU89NGMqhe/x\nEbyHmRN65ubjI41YlhMBhgGCzT4AGCllnCVqyVtI9he+g3Tr8eO1M1GYGYONy5IwNsJHkrbZtCwV\n8eEWxaJWAODbTXDWek7VhvlZMDFaeJ9NGiMWx8zzOCY8SI+XH50uxXXWJa9GpNVtiDnl4lbOKFud\n7u4COK5v3S1GY4M1uhaKZAmGl1brR/pZMXPczaeXBwI1GHeImlMj2p2DLSKfkopyxwF6v2yA0rAs\niV2ADSmPYEZImlR7QszRH+sTi0luZdlYS/9zqncTUTdKlFUWn1deXW7FuEJp9AugzxWzalYlZZPN\nDElHSsAkt8hjh7Sa2cm7cNadFDBWtkKbY7lBEbMDgJhQoSO0WnSYkxqKB7LjoNV4SnHrtSrFd4oB\n94auRpmWmNCBe6n0sPMOdDm7oWbVUgfvQDe0Gk6hVdRbIkakjalX1qJ2OaQ4j2ggwtyDEPkUY7Ah\nEI8nrZRUAnoT5OsFL51yND0teCqMam+M840DwzAeK4XlnuDksYLhnheRg3mRWZg/Rgi021x2RAYZ\nERUidPrfnjMO6wsnSW2i0bmgZlUwaUxotbchJd4q1H2RxTbkXqgIq+1GTFq1lBoOACZvFcbHCt/j\nbxA8Ko4I3yMmU0QGGvG9xBVYFLgMq+6bDR+jFgumRSIiUGi7ZMcyeF9YiCBfL7Asg4mRymCxr7cR\n8yZ7LvIUi6SJzAnP8DhmyewIWC16RITo3OfoEC4aDFYZz2Hdiz6FMs3C70vFMSB83122JvQr7Fg3\nQ/qs1fasHwo0WqBTaW8azxsINxYRotwW/cUsfpC+CTaXHS8fex2AMptKzaqkeXipWJHbYIij3s+u\nnUZqwOAErwYbeQxHw6qlTlT+I51gTYCaVSEveh4qG8/ifK8ypwCgYtWSweAYVqon3mpvV3gYVS0X\noGI4D2M9WDz17RR02Zww6NT4Ts7YAZ/nrxeyz75ur8Wl60I2k9o9DSfGZq7bWxFkCJQMhqi51OXq\nCVwa1F6KHPpoc6RUX6Ld0aGQyhc1y/y9BA/oO+OXY1xQNCJ1Y275ueVMD56C6cFTpM8+WsELKIpf\nAgaMwoPJmBiMyEAjwgK8wTKxKHOvphZrS+j1AGzA3ORIcCyH02c10rN4qw0waY3gW3l0ODqx+Tsp\n2PX5WVx2O1ydzi6EegfjSi814a9RiRBrj0fjghPN7sSQaP9ATP3WePyr9Qucb70AG+lC8bIkRAYa\noWZVmJ84BX2xZpEyVrN4egxe6ZEeQ0yAFeGWQMCtWB9likSzrUURNxJ5dNL3sPPUO4izRONccxXO\nd55BCuLgbQDQIigla9zTUHERBrQ4HHAQb7R1OMB4CVO7S2ZG4WKZHo0QastzLAO5/23RmkEIQYej\nE1rZLXAckZIIArwFoxRqUAqG3g7UYHyDiFIeWeH39VtDAAAs7ikp0cNgGAYaTo2pQSn9nnO3iTZH\nSh2KXaHu2jNqFUeV88ZkwaI19Wkw1CwnqXWyDCtTDW2VgoI2lx1NthYEewcNSX1sANCqOWjVt17c\nx1fvCwaMpKAK9AgFymNPk6zjpfUtf/zir4qYBeAp4xBhDEOEMQwHa/6N5u4WmZClQ5LOFo2VmlVh\nSUIOrl0b3Bz8J5JXobajHskBnvPhDMNIo3QAUjXHvV/+FVMCk9HtErwq0auWDyTaHR0wu41Pq70N\nH1w9jMv2nhgFT3iYtSbJYCRZE/FZw+fSuSJO3olmt6Cgj9aMlAh/uOqm4fznFxBhDJNSyW8F+X3O\nH5ONtKAUWPV+aOpuRrfThrzoXOn5ezPROh6vZm5DafWHONdchX9dPoLJAUm40n4VWk4DjuXAEhYc\nw4FwDrAuHmqiAt+tAWtswv88NhV+Zh3iIr3ReFWs1kjAdxjBaLvAqJyIMIbBwTtwpukLhVy70ZtD\nu7t2jtifPJmy5pafvzfUYAwBNxNUWxqXd8P9ooch1hlgh8HMIcdyyI6cjb1fKIUV+3ODk/0n4lhd\nBc40KUuiBhoCEGWKxNnmLxHo5S9Ne8hLXIoSKfJU2nsFNatCiHeQx2gYgJTCCQi1QkRj6uSd+MMX\n7yqOtbnsYMBIMSwtp5WmPeTV29rs7ah3y6sHeCljLINNkCFQGvTcjLG+cbBozWixXcf2Y6+jobtJ\n4VH3HpGbNEKndt3Wio9rPdPP5ZlzsZYoWHQWHKr5N2plhZPsLofkYfi4g/STA5Lga/GGH26gpXYD\nQgxBWBh1P8b6xCHGMkbanhM5Z0Dn916T9crxnwPomc5jGAZmrQkttuvgCQ8dp0VSaDhOtzXBxrYB\nMEqGQPwtJIWNwXV7C6o7LiPMGIKGLiGlV66N5WfRYoyPBcebAbO7P+mrhOytMuQ90Ycffoh58+Yh\nNzcXO3fu9Nhvt9tRXFyMnJwcLF++HF9/7SkmNty4U41/i9YMHafD541CVkR/dRruNWaHzsCCMdko\nilsibZN7GHJ0Ki2eSF7tsT3ZfwJWTXgQK8YuRUboNMnD6KsmcvAAO69vmm+P/Vaf2+WdXoDeCj+9\nLzZP3dDvdWaGpkt/q1gOZnen2tDZk/Pfam/DsboKsAw7JOnWt4tepcP65O8DABrcAx957EXbayBh\n1vZ4GH1fr6fttCot/NwGQV6LxME7PBa3MgyD9LAUKZHkVmEYBgui7lcYi1ul3eGpNCz3IEXD2uHo\nhIpVYVyQkHFZ11mP67ZWnGxQrlL3NRjh6yU8X5h3iCRGKn9HCFzSlJTlNp+9L4bUw+B5Hlu3bsWu\nXbsQEBCAwsJCzJ07FzExPRlFf/rTn2A2m/H+++/jwIED2L59O37yk58M5W0NOao+Aty3AsuwCDT4\nS4v36js9iwjdizAMg4XROYptfB8yF3JK0orh5J041XAGHY5OWN3TKmJnKc6Tn7h2yuPcCX7jPLbd\nC0SbI/Fa5kt45vAPFenWetmUlDg1E24MQV50Lg5f+VgKXmtYNUxaE+aEZUDHafHP6kNI9Bsnqab+\nq+aIx3fGW2I8FozebQINAZgbPgttjnYcrf1UsU9uMNSsWurQ93/1jz6vZVB7warzRUN3E8waEwxq\nYVBWJyvSVN1Wg5r2r8Ey7G0biKFgbsQs1HXW41TDGagYzmMGQjRuLuJCp7NLSlr49ef/1+f1vFR6\n+GgtONd8HtHmSKl/+MMX+6Rjvmy5AAAesaY7ZUgNxsmTJxEZGYnQUCHtcOHChSgtLVUYjNLSUqxf\nvx4AkJubixdeeGEob+kbId4nBvMis5B8BwFqcdQAAMvi8wfjtu4KvUuQ9kacVpJ3rHLE/P86mdFc\nHp+PWWEz+jz+XoFjObx83w8VUxJsP/XQ542Zi/sjMrH+4HMAgB/P3goGDBiGwZKY+cgKvw9mrUmK\ne8lH4WaNCQ7eccfpxEPFt+IWgSc8jtZ+ijhZhp98hP1f05+FQe2FsT6xONdc5XENHadDRkg67gud\nhsrGL5DoN06q4d3bePKExxhThMdU0N3EpDHi0Un/geu2Vmg5DT68UqaQ6rDIpH4SfOMRY4mCXqVH\nl7NvzayxPnGItUQhK/w+cCwneeF9VQQ0qL0GdSAxpAajrq4OwcE988yBgYE4dUo5Uqyvr0dQkNB4\nHMfBZDKhpaUFFsu9417fKizDIq+PPOxboTBuMbScBkvjFkvu+nAkziIsMhvonG9veqeBzh+Tfc8b\nC5H+XlT59Ir8WLPGBC2nURoZ2WjZr9fiudzIrD7z/e81WIbFj2dthUrWHvLqdOLzPZG8GicbKlHb\nUY+/XXhP2r9ywoOSqsKMEGEhYn/TkfMis5DZR0rrvYD4nDd6Fx4cVwiGYfB40kqcvPY5ciIz8fTh\nHwIQ4ikTrAmI8xEMr5i+31+qNADMCp0+SHcvMKQGo3eBkYEcQwgZtFz64Yy/lx9WTnjwbt/GHWPV\n++GnmdskYcPbYWlcHv785d+wZuJ3pfUow5G04Mm40lGL2f28xFtnPHfD3z7DMJgckITj9Z9hScx8\nzAm7NzvGvui9TiHRbyzeu1gqZRkBgmFJ9p8A+ANTA1PgIi64iKtP48AwDMb5xOFss5BNtSw+H+lB\nqQodseGCWH9mTliG9P8fbY6UtNz+O+N5qFiVlHnWmyiTclGeWWPCjJCpmBqYgkDD7QX7+4MhA+nV\nb5OKigq8/vrr+NWvfgUAUtB7zZqe9K7Vq1dj3bp1SEpKgsvlQkZGBsrK+lGjpFAoFMpdY0gn+iZO\nnIjq6mpcuXIFdrsd+/fvx9y5cxXHzJkzB/v2CcGa9957D9Omeco6UygUCuXuM6QeBiCk1f7oRz8C\nIQSFhYVYs2YNXnvtNUycOBFz5syB3W7H008/jTNnzsBisWDHjh0ICwu7+YUpFAqF8o0y5AaDQqFQ\nKCODeyf3jEKhUCj3NNRgUCgUCmVAUINBoVAolAEx7AzGzbSpRholJSWYMWMG8vJ6BAuvX7+OlStX\nIjc3F6tWrUJbW8/K3xdffBE5OTlYsmQJzpw5czdueUiora3Fd7/7XSxYsAB5eXl45513AIzOtrDb\n7SgqKkJ+fj7y8vLws5/9DABQU1ODZcuWITc3Fxs3boTT6ZSOH2l6bb3heR4FBQV49NFHAYzetsjK\nysLixYuRn5+PwsJCAIP8jpBhhMvlItnZ2aSmpobY7XayePFiUlVVdbdva0j55JNPSGVlJVm0aJG0\n7eWXXyY7d+4khBDyi1/8gmzfvp0QQsjBgwfJ97//fUIIIRUVFaSoqOibv+Ehor6+nlRWVhJCCGlv\nbyc5OTmkqqpqVLYFIYR0dnYSQghxOp2kqKiIVFRUkCeffJIcOHCAEELI888/T37/+98TQgjZvXs3\n2bJlCyGEkP3795MNGzbclXseSn7zm9+QTZs2kUceeYQQQkZtW2RlZZGWlhbFtsF8R4aVhyHXT7us\nDgAACDZJREFUplKr1ZI21UhmypQpMJmUQmqlpaUoKCgAABQUFEhtUFpaivx8QXcqKSkJbW1taGho\n+GZveIjw9/dHQkICAMBgMCAmJgZ1dXWjsi0AQK8X5EXsdjucTicYhkF5eTlyc4WV0wUFBfjnP/8J\nQPl7yc3NHXELY2tra3Ho0CEUFRVJ2z7++ONR2RaEEPC8ssTxYL4jw8pg9KVNVV9ff4MzRiZNTU2w\nWoXaB/7+/mhqEkTp5LpcgNA+dXV1fV5jOFNTU4OzZ88iKSkJjY2No7IteJ5Hfn4+Zs6ciZkzZyI8\nPBwmkwksK7zSQUFB0vP2p9c2Uti2bRueeeYZSVajubkZZrN5VLYFwzBYtWoVli5dir179wLAoL4j\nw6qAEqFLRm5IX+0z0nS5Ojo6sH79epSUlMBgMPT7fCO9LViWxbvvvov29nasXbsW58+f9zhGfN7e\nbUFGkF7bwYMHYbVakZCQgPLycgDC8/V+5tHQFgCwZ88eySisXLkSUVFRg/qODCuDERQUpAhS1dXV\nISBgcMW1hgN+fn5oaGiA1WrFtWvX4OvrC0AYIdTW1krH1dbWjqj2cTqdWL9+PZYsWYLs7GwAo7ct\nRLy9vTF16lR89tlnaG1tBc/zYFlW8bxiWwQGBsLlcqG9vR1ms/kmVx4efPrpp/jggw9w6NAh2Gw2\ndHR0YNu2bWhraxt1bQEIHgQA+Pr6Ijs7GydPnhzUd2RYTUkNRJtqJNJ7JJCVlYW//OUvAIB9+/ZJ\nbTB37ly8+65Q6rOiogImk0lyRUcCJSUliI2NxcMPPyxtG41t0dTUJGW6dHd3o6ysDLGxsUhPT8d7\n7wmy4PK2yMrKGrF6bRs3bsTBgwdRWlqKHTt2ID09Ha+88sqobIuuri50dAjV/To7O3HkyBHEx8cP\n6jsy7KRB+tKmGsls2rQJ5eXlaGlpgdVqxbp165CdnY0nn3wSV69eRUhICF599VUpMP7CCy/g8OHD\n0Ov1eOmll5CYOHzlwOUcP34cDz30EOLj48EwQnGh4uJiTJo0CRs2bBhVbXHu3Dls3rwZPM+D53ks\nWLAAjz32GC5fvoyNGzeitbUVCQkJ2L59O9Rq9ajRazt69Ch+/etf48033xyVbXH58mU88cQTYBgG\nLpcLeXl5WLNmDVpaWgbtHRl2BoNCoVAod4dhNSVFoVAolLsHNRgUCoVCGRDUYFAoFAplQFCDQaFQ\nKJQBQQ0GhUKhUAYENRgUCoVCGRDUYFCGNcuWLUNBQQEWLlyIxMREFBQUoKCgACUlJbd8rdWrVw9I\n7vq5555DRUXF7dzuLVFZWYm///3vQ/49FMpAoeswKCOCK1euoLCw8Ibqo6JUxHBh7969KCsrw44d\nO+72rVAoAIaZlhSFciuUlZVh+/btSE5ORmVlJdauXYumpibs3r1bKqizefNmpKWlAQBmz56NXbt2\nISoqCitWrEBKSgpOnDiB+vp6LFq0CBs2bAAArFixAo8//jgyMjLw9NNPw9vbG+fPn0ddXR1SU1Px\n0ksvARC0eZ555hk0NzcjPDwcLpcLWVlZWL58ueI+GxoasGnTJjQ3NwMAMjIysHr1arzxxhvo7OxE\nQUEB0tPTsXnzZpw4cQI7duxAV1cXAGD9+vWYNWsWqqursWLFCixatAjHjx+H3W7Hli1bkJqa+o20\nNWWUcCfFOiiUe4Wamhoybdo0xbaPPvqIjB8/npw6dUraJi8uU1VVRTIzM6XPs2bNIhcuXCCEEPLA\nAw+QTZs2EUIIaW1tJWlpaaSmpkbad/jwYUIIIU899RR56KGHiMPhIDabjcybN4+Ul5cTQgh57LHH\nyC9/+UtCCCGXL18mKSkpZM+ePR73/tZbb5Hnn39e+tza2koIIeSPf/wj2bhxo+Le8/PzSWNjIyGE\nkNraWjJr1izS3t5OLl26RMaOHUv2798vPXtmZiZxOp0Db0QK5SZQD4MyoomOjsaECROkzxcvXsRr\nr72G+vp6cByH+vp6tLS0wGKxeJw7f/58AIDRaERUVBSqq6sRGhrqcdz9998PlUp4lcaPH4/q6mqk\npaWhvLwcL774IgAgLCxM8mR6k5ycjN/97nd45ZVXMHXqVGRkZPR53PHjx1FTU4NVq1ZJgpQcx+Hy\n5cvw8vKCXq/HggULAADTp08Hx3G4ePEiYmJiBtpcFMoNoQaDMqIxGAyKz8XFxdiyZQtmz54Nnucx\nadIk2Gy2Ps/VarXS3yzLwuVy3dJxA62zMHnyZOzbtw8fffQR/vznP+Ott97Cb3/7W4/jCCFITEzE\nrl27PPZVV1d7bON5fkTVeqDcfYZPBJBCuQlkAPkb7e3tkjrpnj17+jUCg0FaWpokK33lyhUcPXq0\nz+Nqamrg7e2NBQsWYPPmzTh9+jQAodaFKGMOAKmpqaiqqsKxY8ekbSdPnpT+7urqwoEDBwAIJUoB\nIDIycnAfijKqoR4GZcQwkNF0SUkJ1qxZg+DgYKSnp8NoNPZ5fu9r9bfvRsf94Ac/wLPPPov9+/cj\nOjoaqampiu8TKSsrwzvvvAOO40AIwdatWwEAM2fOxNtvv438/HxMmzYNmzdvxhtvvIHt27ejra0N\nDocD4eHhePPNNwEAVqsVX375JYqKimC327Fjxw5wHHfTNqFQBgpNq6VQhgibzQa1Wg2WZVFXV4ei\noiLs3r0b4eHhg/5dYpbUkSNHBv3aFIoI9TAolCHiwoULeO6550AIAc/zKC4uHhJjQaF8U1APg0Kh\nUCgDgga9KRQKhTIgqMGgUCgUyoCgBoNCoVAoA4IaDAqFQqEMCGowKBQKhTIgqMGgUCgUyoD4f001\n1ZxdsABYAAAAAElFTkSuQmCC\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f96f1241810\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "test_accuracy tf.Tensor(0.99, shape=(), dtype=float32)\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsnXdgFGXex79TtiabZJNsGoGEBEihhRJAQBQQQaqABQue\n4h3HqYeK7ThFz0NRz8N2nHIoqKe+dyIKJ4KA0gQpofcaSO/JJluSrTPvH7PTtiSBJIIwn792Z6c8\n88zs83t+9SFYlmWhoKCgoKDQAuSVboCCgoKCwq8DRWAoKCgoKLQKRWAoKCgoKLQKRWAoKCgoKLQK\nRWAoKCgoKLQKRWAoKCgoKLQKRWAoKCgoKLQKRWAoXHfs378ft91225VuxjVPv379UFJScqWbodCO\nKAJDQWDUqFHo3bs36uvrZdunTJmCzMxMlJWVAQD+9Kc/ITMzE8eOHRP2KSoqQmZmpvB95syZWLVq\nlfB96dKlGD16NPr374+bb74Z8+bNAwBMnDgR/fv3R//+/ZGdnY0+ffqgX79+6N+/P5YtWxbQxiVL\nluDZZ59t030OHDgQ33///SUd869//Qtvv/028vLycNNNN7Xp+jz+fXStcejQISQnJ1/pZii0I/SV\nboDC1UVycjLWrVuH++67DwBw9uxZOJ1OEAQh7EMQBKKiovDOO+9g+fLlsu3BWL16NdauXYtPP/0U\nycnJqK2txZYtWwAA3333nbDfzJkzcfvtt2P69OltugeWZUO25XLZvn07nn76abjd7nY/99WK1+sF\nRVFXuhkKVxGKhqEgY8qUKVi9erXwffXq1Zg6dWrAflOnTsWZM2ewf//+Fs95/PhxDB8+XJhtxsTE\n4M477wy6b3OVanbs2IGlS5di/fr16NevH26//XYAnKB5++23cc899yAnJwclJSX45ptvMH78ePTv\n3x9jxozBl19+KZzHX0sYNWoUVqxYgcmTJyM3Nxfz5s2Dy+USfrdYLCgsLER2djZmz56NqqoqQQuq\nrq4Gy7JYtmwZxowZgyFDhuDJJ5+ExWIBALhcLjzzzDMYPHgwcnNzceedd6Kurg5vv/02Dhw4gIUL\nF6J///545ZVXgt7z448/juHDhyM3NxczZ87E+fPnhd+cTidef/11jBo1Crm5ubjvvvuEdu/fvx8z\nZsxAbm4uRo4ciTVr1gh9JdVqVq9ejXvvvVf4npmZiS+++AJjx47F2LFjAQCvvvoqbr75ZgwYMADT\np0+XPXOGYbB06VKMGTNG+L2yslI4V3FxsdAPb7zxBkaOHInhw4fjL3/5i9BWs9mMOXPmIDc3F4MH\nD8b9998f8h1QuLIoAkNBRt++fWG323HhwgUwDIMNGzZg8uTJAQO5VqvFnDlz8NZbb7XqnGvWrMHy\n5ctx/PhxMAxzWW278cYbMWfOHIwfPx6HDh0SBkEAWLt2LV555RUcPHgQiYmJiImJwbJly3Dw4EG8\n9tpreO2113Dq1Clhf38tYcOGDVixYgU2b96M06dPy4Tmzp07MWTIEGi1Wnz44YeIi4vDoUOHcPDg\nQZhMJnz66afYsmULvvjiC+zYsQMRERF4+eWXAXADss1mw44dO5CXl4eXX34ZGo0GTz75JAYMGIAF\nCxbg4MGDeOGFF4Le80033YQffvgBu3btQnZ2Np5++mnht9dffx0nT57El19+iby8PDzzzDMgCALl\n5eWYPXs2HnjgAezZswdr1qyRmQv98e+LLVu2YNWqVVi/fj0AoE+fPvj222+xb98+TJo0CU888YQw\n2K9YsQLr16/HRx99hAMHDmDRokXQarUB533zzTdRWFiIb7/9Fps2bUJlZSX++c9/AgA+/vhjJCQk\nYO/evdi1axeefPLJkG1VuLIoAkMhgClTpmDNmjX4+eefkZaWhri4uKD73XXXXSgvL8eOHTuaPd/k\nyZOxYMEC/Pzzz5g5cyaGDh0a1D/RFqZOnYr09HSQJAmapnHTTTcJGs3AgQMxbNiwZrWhBx54ALGx\nsYiIiMDIkSNlwmXbtm3N+i1WrlyJJ554AnFxcVCpVHj00UexceNGMAwDmqZRX1+PixcvgiAIZGdn\nIywsrNX3NW3aNOh0OuG8p0+fhs1mA8uy+Oabb/DCCy/AZDKBIAjk5ORApVJh7dq1GDZsGMaPHw+K\nohAZGdmswPDn97//PQwGA9RqNQBg0qRJiIiIAEmSePDBB+FyuXDx4kUAwKpVq/Dkk08iJSUFAJCR\nkYHIyEgAcm1x1apVmD9/PgwGA/R6PWbPni2YI2maRnV1NUpKSkBRFAYMGNDqtir8sig+DIUAJk+e\njPvvvx8lJSWYMmVKyP3UajUeeeQRvPvuu1i8eHGz55w4cSImTpwIr9eLH3/8EU899RR69uyJYcOG\ntUubExISZN+3b9+O999/HwUFBWAYBg6HAxkZGSGPj4mJET7rdDpUV1cD4Aa9Xbt2Yf78+SGPLSsr\nw2OPPQaSJIVjaJpGTU0NpkyZgoqKCsybNw9WqxWTJk3CvHnzWuUbYBgGb731FjZu3Aiz2QyCIEAQ\nBMxmM1wuF1wuFzp37hxwXHl5edDtrcW/L1esWIFVq1YJfWK322E2mwEAFRUVLV6rrq4OTU1NMt8U\nwzCCQHn44YexZMkSzJo1CwRB4M4778Ts2bMvu/0KHYeiYSgEkJSUhE6dOuGnn37Crbfe2uy+06ZN\ng9VqxQ8//NCqc1MUhbFjxyIjIwPnzp1rj+YCkJs/XC4XHn/8cfz2t7/F7t27sW/fPowYMaJZ/0go\njh07huTkZBiNxoDr8CQmJuLDDz9EXl4e8vLysG/fPhw+fBhxcXGgaRqPPvoo1q1bh//+97/Ytm2b\nYEpryXm+du1abN26FZ9++in279+PLVu2CPdgNBqh0WhQVFQUtD3BtgOAXq+Hw+EQvvNCQIq0Xfv3\n78dHH32E9957D/v27cO+ffsQHh4utCMhISHktXiMRiN0Oh2+++47oY/279+PAwcOAADCwsLw3HPP\n4ccff8TSpUvxySefYM+ePc2eU+HKoAgMhaAsWrQIn376qWCPDgVFUXjsscfw4Ycfhtxn9erV2L59\nO+x2O1iWxfbt25Gfn48+ffpccrtiY2NRWlra7ODvdrvhdrthNBpBkiS2b9+On3/++ZKvBXDmqBEj\nRgjfY2JiUF9fD5vNJmy7++678dZbbwlhx3V1ddi8eTMAYO/evTh79iwYhoFerwdN04J2ERsbKziF\ng2G326FWqxEREYHGxkYsXrxYGMwJgsC0adPw+uuvo6qqCgzD4PDhw3C73Zg0aRJ2796NDRs2wOv1\nor6+HqdPnwbAOaI3bdoEh8OBwsJCfP31183ev91uB03TiIqKgsvlwpIlS2C324Xf77zzTrz77rso\nLCwEAJw5cwYNDQ2yc/Baw6JFi1BXVwcAqKysxM6dO4U+5oWOXq8HRVFKdNZVSocKjD//+c8YOnQo\nJk2aFHKfV155BbfeeiumTJkisxsr/PJIZ5adO3dGz549g/7mz8SJExEXFxcQessTHh6OpUuXCtE8\nixcvxl/+8hf0798/5PVDMW7cOLAsi8GDB2PatGlBjwsLC8Pzzz+Pxx9/HIMGDcL69esxevTokOds\n7rrbt2+X+S/S0tIwYcIEjB49GoMGDUJ1dTV+85vfYPTo0Zg1axYGDBiAGTNm4OjRowCAmpoazJ07\nFwMGDMDEiRMxePBgTJ48GQDnN9mwYQMGDx6MV199NeDat99+OxITEzFixAhMnDgR/fr1k/3+3HPP\noUePHrjjjjswePBgLF68GCzLIjExEcuWLcOKFSswaNAgTJ06VRAYDz74IFQqFYYNG4b58+cH/Df9\n++LGG2/EjTfeiLFjx2L06NHQ6XQyk9VDDz2E2267Tbj3F154QdBgpOd6+umnkZKSgrvuugsDBw7E\nrFmzUFBQAAAoKCjAgw8+iH79+uGee+7Bfffdh9zc3JDPROHKQXTkinv79+9HWFgYnn32Waxduzbg\n9+3bt+OLL77AsmXLcOTIEbz66qtYuXJlRzVHQeGSqK2txe23396iU19B4XqhQzWMgQMHIiIiIuTv\nmzdvFmLp+/btC6vVipqamo5skoJCq7Farc06uxUUrjeuaJRUVVWVTL2Nj49HZWUlYmNjr2CrFBQ4\nUlNTkZqaeqWboaBw1XBFnd7BrGHXS9kFBQUFhV8bV1TDiI+PR0VFhfC9oqIiZJKYlI6oFdSefHpo\nFdad3QwtrcG/p79zWee468s/CJ+jdVFYOvm19mpeh1FurcLj618CAKy8+wMAwHu7V2Bn0T7EhcVg\nycTg5S8W/7wMe0sOAQA+mvI3RGgNwm9bLvyMpfs+DzjmnxNfgSksJmD71cxft76D41Vn0D2mK169\nhSugWGGtwlxfn/nzybS3oFfphO8sy+LulY8I33vG9cBLI6/erOjT1efx4hZ5fg7/Xizc9g6OVZ5B\nt+hULBrzHABgT/FBvLXrQ2hoDT6+/e+4d9UfAQDzhv4OQzrLAyQ2nNuGFQe/RDAitRH4cMob7X07\nbeJ8bQH+/CPXpi/vel82fj2z8VUU1otVfVfe/YHw/3/hprnok5CFnYX78N6eFQCA/9y5BBQpRpF9\neWwtvj65Puh1aZLGx1MXQ0Or2+U+OlxgNOdTHz16NL744guMHz8ehw8fRkRERKvMUQRBoLra2p7N\nbFdcDi8AwMN4L6udDU6L7HtaRGrI85hMhqumL87WivH4fJtcLq4v3J7gffG//O8FYQEAP58/jAh1\nOPIqDuGejGm4UFUa9FrPblyEV4Y+DzWlEra1R194GS/+e+YbDIjPQWZ090s+nmVZfHZqJeocZmhp\nLYYkDEBOXG8AgMvtey/cDKqrrWhwWvDS7tAD2+5zR0CRFI5UH8eMjGmosFfJfj9RdRZPr38FRk0U\nHup5L1Tt3Bdt5Xx5YGlzvk1a6Ll96grww8ndyDH1wt4CLrLM6XHi6e/FqLG3dn2Il4Y8ixJbGc6Y\nz+PuHrejrC60r7PBYUFpRS3UlBpWlw3fFHyLMUmjkBSeEPKYULi8bnx+aiVGdxmBlIjLS4Yss1Xg\nrYMfCN/nb3wD47uOQVZ0DwCAjhAnBUZNlOy5LdnzKSLVEahsFJ/9/gsnUeeoR4GlCNO7TUJhbVnI\na3sYD/acP4rsmAysPLsGjw6beVn3wNOhJqmnnnoKM2bMwMWLF3HzzTfj66+/xn//+1+hEBxfvmHM\nmDF48cUX8dJLwWdavzYogutWL+O9rONP18kT2rSUps1t+iWoaaoN2Eb6XjEWgRMHs6Memwq3Bpzj\n3UPLsLt8H87W56O2iYvbD6P1sv3s7kYUWppPGLscTtSexq7yffjH4dB5Jc3R4LJgb8UBnKu/gGM1\nJ/Hh8c9C7ru1eCfcjDvk7yW2Mrx/ZAV+LstDsbUUp+vOBuxTZC3FkZoTOGM+H+QMV5bqIO8Dj0Ed\nLnz+8Ni/Acjf+zJ7hWz/U3Vnsfz459hZugd1DjOsruDCMCEsHgBQ43tvVp9fh7ySw/jPmW8u6x4O\nVh3Bgaoj+Nv+f1zW8QDwwdGP0eRpEr5faCjEksMfCd/rnGbhc7TWCDfjEb43uhtRaC2Gw+sUtlXa\nq/Hxif/D1uKdsLhsqG2qAwECNCmf/9/ShQsHr2qsgcPjxPaSXZd9DzwdKjAWL16MnTt34vjx49i2\nbRumT5+OGTNm4O677xb2efHFF/HDDz/g22+/lcX9/5ohfeoiCxb1Ti6JqcRaJggQs6MeDU75C+/y\nulBm4/4kp83cH+d3vbjZgJdl4GE8KLaGnklcaRiWwa7yfQHbSV54sty9syyLQksxGJbB2gsbA/b3\nSIRsibUMlY3VoEkaTw98LGDfat+g0J6YnQ0t79QMlhADGQCwLF90kROeUnOTFL7PaiT3d67+Ak76\nBMYfc34XcMy6i5uEgabOYYa5qW33cbkwLIMiSwk8jAe7g7wP/D35TyCqG2tR66hDplGu1fGDoFSA\n1DTVweLiEief6Pd7YfsNibkYkjDAtw8nrEps/H+mddkDdRYHzFZxcFZToinnWM1JVNirUGGvwvGa\nUzhWcxIuryvYaQBwE8Yia0nISUFtkxkMy6CuyYyUiM5Qkyq4GTfOmy8AAAbG5+Dtm1+FilTJjiuQ\nTJRqHbWobqpFrC4a83OfELaPSx2N7lFpALixxeqyoT1Qakl1ABQh2hef//lVPNL3Ybx/ZDluSMzF\n/Vl34oVdiwAA/xz1N2G/5ce/wPHaU3h24B9xvv4iwlVh6GzoBIAbbL8v2IwNBZvx/KB5l6VadzTH\na06h2MqZj6QaEW+q5U2TJ2pP44OjHyNKEykIUym8YAGANfmcXTZBHyczPWkpDRxeJ2qbmcFeLv7m\nwEuluT9mo2+Wyc8WvSGq9kZrolDrMMs0ttXn1wHgZtAmnei7yTB2wxnzeRRZS/Fz6V7c3HkY3j20\nDGEaHZ7tP7dN93IplFTb8PW2fPTKteObC2sQ7e6GelXg831p9+v4x8jX4fKKg2iMNhr5DVwxwz6m\nnrjQUACXb5A1aiJR3VSLY9UnhP2rm2phddlAExQiNWLYvppSIUYXDQAotVRj87bDqDJxpiuPn7Zf\nU98EmiYRFS7X3p9+n5uFr/jTKADA7hOioFp69JOA+xmfegsmpAUvn/Ppyf/iQNWRoL8BwIu7X8Or\nw56Hh/UiRmtEbVMdah11WHKE0z6KS904qKqGs5EGqRX7a2fZXuFzibUcNrcdnQ2doKXFe1GTavyY\nVw5oASfjgtXdPuZJpTRIB0CT8rIGxVbOlhtsxsVzvJbLcr/QUAizox6JYfGCY4thGRyrOQkAqG66\nOvNUpIObRjIrc/pmYIxvdl3la38wYQFwNld/0iJTZbMs3uzQnMnjcuEFRqSac7x7GQb/XH0Mu49X\nNHeYgKUZgcELE6uLqzZ74Dw3+52UNhYP97offWI5DZsiKURro4Ka+LKM3WW+Cj2tw9RuEwBwfepl\nvKhpqkVhfQnqHOaA49uTH/cX46utnCns3a+O4Eh+LfZc4LSgGoZ7529LvSXgOLu7USYwaJIW3odY\nXQxujRDXw6B8GkaDRHOrbaqD1WVFuDoc63aJfhI1qUa4iqsEfKqkCscuVgmze/++fHbpbsxbIi8X\n4/aIAtza6EJxlQ2H85t/7rVN9SF/a05Y8Jgd3H2HqcKgIlWwuxuF30rKXVjyzTGA4cYBLREecPze\n4uMAuH7TkKLAyC9qxLHz3LmdXmez7+WloGgYbeRg1VEcrjqG32TPEAZ4f0e/nhZNDwv3/F34vDZ/\nA1SUGuNSRwnbVp37FgD3AvCaitnRgFJbOYDmB6S28lPJLhRbS3FfVvDFjZpD2q4GlxX/Pvkl7s+6\nEw4PN5vmBYaOEmtTqSk1Mo3dcbRGnD1KNQyeblFdZfbZWF00SqylMpPNt/kbcOrAGUxOvQ1ZMT0u\nuf08dc564R7mb34LpohwHL+QjANnqtE9ORKxUeKzLKm24b+bz+GeW3qgUyw3UAWzrf/f6VWYkTEN\nNjdXg8nubsT8D3fDbKgHHQ/0NfVCYlg8DldxS94SBIlYXUxQv0QPYzpUkr6w2LzYcsgFJHOai9Ut\nPofTdecwNGlQ0Ps8U2TGpn3F+O3EbOg0wYeB7y5sxL7yY4jWRYAFgyaPAwzLQEfr0MXQCet/5Aaw\niUNTUW/jJgaNbANAAKSGKw+SY+qF7wt+lJ3X4rLCzXD7EwyNysYqwTz57bZSnDsHqLOiQBnqg/rv\nCswVsLhsiNfHYceuKuh81dBVlApNjZxK64EThEo0LTV6mvDmyjzcOSIT8UbRH3ax3IIIvRoxkVqY\nbU7Qnc4BXhrnSnrjbHE9QDbvh9xbuR+RWgOmpN+GAksRNhRsxm+y72l1octKO1f8sbLGBcbrN3+n\n5JOnRisF0k9mXLRdAEFx/4ldR8VCkkXlTQDDTdzqbHYcbWgfc7aiYbSRTYVbcaDqCCokUQz+g56W\nFgdJ6X4bCrdg7YUNcHsDbZycwOAeD6+uA8EHpPbiy7NrsKt8nzC4Xwr87FnnE457Kw6g2FoKh5cb\nOPg+kTr0RiYPl5magOAaRh9TT9ASM5+aVCMxLB6ltjLBhry1ZCeKGkqxvfTyigzy1DvEGaOFqEC+\n9TzUXblZ3PnSBtTbnCiv5Qb+XccqcLLAjAUf7QXjGyD4fsiNF8NAfy7LQ4W9SuhXFiyqnZXCgPDN\n1iJsPViCKenj0cXQCQ9l34P+cYGFGRl7BGpKw1FRLXGA1rpQWcP1gcPjkJnE/IMnpCz+8jAOnavB\njqPlQX93ed34vmAzapxVOFt/HufqL6DEVoYyewXyGy5ia8lOYd+iSiu8DHf/UgcuADQ1EtDV9ZJt\nyztbJGgYXpf8+Z8r4N4X98VeiNMmYKZk8kKAAOuhcc56Gm7GDT0ihdk3AOw8VIn3vuQ09WqrBYRK\n7l84XVGK91cfR53ER7Hw0/145oNd8DIMzBYHVJ3yoepyBkv/dxyb9hWDoFoOXNlUuBV/+7+DePvA\nv3Cs5hS2l+zCgdLgdfFU9Wny+63mBvIT5y0wN8jffY1Fvi/rDRTsfPs276rDFz+Iz7vW7BH2P3i+\nAjtPFrR4H61BERhtwOayo8TniJaqvP720pbIbygI2BatjRKcn1KkA0JNUy12Fe2XRWC0B8EG7Zaw\n+GykRk2kuM1lFTQMN+NBdWOtYCKY3fs3mJw+DoTfK9jolt/LY31/Cx2tlcWdqygaGdHd4WG9OF/P\nCVM+qv2c+cJlR6fxbfaHiqoBaBfqbS7MW/Iznv9wLxiGhcPF9xOLjScOg2VZ4fgp6eOQFpkqnKPc\nXik/Z0QNCJ/AOHDSjM82nQXp0YM8PwKEMxLDOw1BnF4eYu48MRSfb8jHwk8PCNusNq8wMDS6HbL2\nHyo/jcIKuU/m+MVavL3yCDxeboCvqGvEiYI6vPb5ASz6/AAaHW7YHW68vqZ15epBMDhUykdvMSDU\n8uf3zdZiNBWnwmsxCtvWHzgnmCrh8csP8H1nHeGYYnoAbnsYWA93f16nFt4GsU+OHyUBVsxnqKl3\ng/VwAsjqsoOK5sxJjJObxNCmUphtTahsaAARJjeJ/ufHc9h58ajYDC8n3DUarp+cpwI1NakScbqo\nHh7fu731cAE+3x184mI52w2sS9SaTpRx1YpZLw2WEf8LjsMjMH2oXNDCK/4HXOf7wms2Cd+rKuX/\nI9ZLi8KU9Mq0rbagCIw2cMZ8Toj2kJpH/DWMlgawiw2FAdvi9LEy5zn/WWr6ef/Ix3hn93JsKNhy\n6Y1vBvdlCAyrywY1qUKYSlT36xz1cHjEtRe+OP2VIDDUPp8EyZfr9g35/iY3qSOPhyZpZBi7AeCE\nLcuywozV4XWi1B581hwMl9uLd746ggNnquD2umXhi1JUyeewcqtoIqq3OYWZKt35DL6r+g92leeh\nys6FOIarw2W+nHJflE+vmCzuvg31osnB98d+e+VhnCo04/3VnGmqpyEnRKvFQdLjJgCfwLA4GoX+\nI1gKDOXEe9+JA1d5rR1vfXkExy6Ik5tth0qx+L+Hca6kAedLGlBQYcWWAyUoaQxddl2EharLKexo\n+hpUdDkItQMEKY6iLEPgTIEVTU4PwrSiJkGonGh0OcEyZOCsmSXRN51z6m8+UIxV2/KF+2UdeoS7\nk4RdGUuMrC/AUABDgWUIUFE1oBO4/xVjjQIA0PFFIDudwr/2rYIme7dMuG07fwQHveuE7zf04drg\nZrn3ShPENEawcl8ly3LDqdnuAGkI5T8i4a0XB/oGt28/6QAPYGhGV4zom4S//eEGDO7MCY5ErZgH\nwro1gEU8D+uUR9yxXhpd431CmvSgU6f2GeoVH0YbkKr8cg1DPuC2NAC7/MLuJqeNQ2pEF5lpKEoT\nGRB/bvY5NWsvMby0zuJAk9OD+Gg9ln17AgMz4zAoK17S3tC5AaGwumwwqA0yX0NNUy0cXid0tA5N\nniY0eRxCX/D78WY3kiDhZb0B0RxScx6PiqCFaJgGpwVuxi0L07S57LL9nS4viqqs6J4cFXCus8X1\nOJpfi6P5tfjDnWkBv/P4z9BqLQ7UWZzQqCgQJs7xes58AYXWYrD2CLhdrExgFPpsyEZtFDcrptyI\nCKdg81LgB72Saq7dZqsTRZVWrF9LgYwcANajBtwhMnUZCnFGPSxeCnZXk/B+UA4jPLoaWJkGeBkG\nBEHgpRV5Ie+Px9bkxs5j5SBi7MF3YAmA8PU1yYCK5e6LNJi5dkrQUlo4fPemogkIUwfahXp7I8BQ\niNLr4K/TZaYYcSS/FicKuPdbN4h7HxmrEXfn3gzSmAG3U4UP8+SJjNyASwBeFUCK5ijGFgXEcpMI\nOqEIrEsDggBuH52AtIiu6GQKw6INX0F6x+kpWuw+CpiiVagD0D0pBv4GPoIQ37pbB3XCT17f5Idk\nQGgb4Q9xhsuLcBdmgfWooEq6CELLXVWn0iI+NhJl7lqwXhI39U0GSRCIjdRhZu8pGGTuiazoHnhs\nK6ddPjV9ENSMAbVkHzRaaHgjo/HFD2KeTlJUBIb2TMLXZgIE7YGZKQ/QWC8HRcNoA6fN56H1OXGr\nmzFJ8Xb8UHhZr0ybGBDPzSwJyexJRakQrgpDfkOBEDHFz3qks/L8sgb87f8Owtoot9+6PQxOFNSB\nZVk8/+FeLFieh1XHf8AR51Ys/d8J2b5v7l+CmqZabC3eiY+OfSY48BiWxbJvT2DHEbkDbfme71Hv\nbADrVqOkUvyjbCneAZvbjnh9LGhGhxJbGTYUcAsL8b4LXnDwUVBVjfIoMA2pxrELtWh0SKNqVIjw\nJX5ZXVaYG+V/Tj589Wh+LQ6fr8Fnm87gtc8P4uDZwNXl+EEapAcf5zeTrOfngDxX0oCSahv0nYtA\n0Nxv52vKQBAsvA0xOFVYL3umpyqLxPtmKNAqBjodEKbWYdygLrJzO1xefLnlPAACTIMJrD0SrEuH\nOGNg3gbLUOjVNRrw0mh0O1Bj40wtTWYuyotVNaKm3oFNecWCGQqkB+qMfSCjKqHJ3g1N9m6A4vq3\nuMoGc/hR0DEVINjA4UHmyyUYkL5XlI4vgiFFrpWEq8W1yylKslaK2gm7ywmCodApNjDyJ6NLoGAH\nOIGRlhiNiARjAAAgAElEQVSJgQn9kJucHbQv/jxzAGjItQHGHin7Tqg54b+h9kswhgosPbUU9uij\nsn2i4zxIufEw6lScmJh2Y2AghXSScnP/RMBnUgozNoEgGTBN4v3H601gHb57ZSl4azlNiX93/jAp\nB0nRvhBhhkZEuCh8KZJCdkyGrJxISkws0pOMGJTQHzf36INeadGytv12Qm9kpxpBsiqQYRa4GBcy\njZcfDMKjaBiXSaO7CXUOM7JjMpBffxE2yaDtZeWDC2/HDwXDMKBICl4vJ2j4wZAgCFAEBS/rBU1Q\n6GFMx4GqIzhafQK9Y7MFDUQ6K3/3q6OwNbmxYW8RGJZFo8ODh8ZnYdW2fPywvxgP3pYJp69ExY7a\nzaDjAHexfK3remcDPjr2GYp9SU9uxg01pUZdgwN7TlZiz8lK3NiXe+Evlluwr/IQKANQcSEKlMEM\nSv7uIpyKhMtVC1KiLPCCYnzXMSi3V2JK+m34+4F/BvRNQVkTlnzFLf6j85mRfz5aCcvFElAEBYvL\nhq+2nwb04P6wJINGdxOKq2x45yt5WOPhczXo38Mk21ZYyfUdGVkDgg6tWRF+AoMzlbBwxB4TttU5\n6kCouBntP1cfQ+oQ8bmwak4wqUgVWC8FimLg8Dpg0IRhZGYnbMiTZ62fKgw0afxhSi98vP4Uiqok\nZjuGwg09E7DnqApNHgd2nSkEogDGZgRwEaS2EQs/3Y9GJ9f+4X0ScaTuEDyRtaAixUkOGV4PpsGE\nw/kVUKVyiWNhap0Q2QUAzrP9oe4mlnEB6QVJEuCnRy6dPAQ1QiMOmCBYIXeOCrMAlBfGsHDQtDgI\nTus6Fd179hKimAgAzz8wEIdrSWw8fRCM1YiYSO4loqlAYXZzn87o1ikSUfnhqPVYfecgsHTuVHx9\nmsDO0j0BCYPBcisA4GTtaVQ5xfsxGcICdyLEc4XrKcGf4lJx/co0GkDquP5TU2poVBSanL4EVpdc\nc9artMLEjPXSiAoLrlHOyJiGgoYiIbhEaF+UDkOy40HrR8GlL0fn6BiQBIkovR5mJzd5TDYkBj3n\npaBoGJdJjYN7KUy6GE71lti+Q2kYj/R9OOi5vCwjMz9Js0t5kw1FUrgnk1thjtco3D7BJHWEe3U1\nAOVG3qlKbMwrxo6j5SivteOH/dzs78CZIGs4awKd5sU2UYvgHZR86CShbsLn+3/A01+sxMJ/54HU\nNIFx6OCtTBUcd6zEQWfwJskcdgDw2ffn8b+dF2FQh+OJ/nNkdXqkM7MlXwVGm1TWObExrxiMS40G\npxVHL3CmCcb3JzxfWY2SahtIQ60wcwaAXQUn8PK/d+F/h/bjpf9swtnqYpwvqYdeQ6Nr99AZuwBA\n6KwgdFb86T4x+onQy40phIq7Vve4JBAEUFYbGAJ9rsgGlqHgVdlgc9uhpTUwRekw766+sv3UKhK3\n5op9Mu+uvkhJMODpe/phwW8GijsyFExROhjDwgDKAw/BvWs9Yrr62miBQ10F0liJxFQr7hndHdOH\nB840CQ2npZU7xbwGqbAYn3oL0BAvP4bywItAcyuvGetVOrzwwEB0T46EXis+f0JnA6FyIUytFfx9\nKRGdMbrrDegSb4BGTeGlB3Px1mPDkJYUgYmZN8KdnwMVTTdbdHRoTy7RNT5STOZ7ov8cJMVGYEbG\nVDwuyQpviQq7/H+iJpsv3udm3IAvYoklfNFwjWIRTQ2lxh+n90Fmlyj8/ZGhePauQbJwey2lgdM3\nhoSrtVCrgi9Re2OnIZiZfVdAP5AEgdmTe2LWkHGY0+chIWBGahb1FzKXgyIwLhPeyR2ri4GW1sq0\nCH8fRpPP8SsNDZXiZtwhI5NI3zE0QUNLaUGTtCAgeGd6k8cBt9eNqsZqoNtuaHruQq1FbM/zH4qZ\noaU1gYMYoWls1jH//PJdyDtViTordx+qlJPYbfkBTYn7QcWWglA7wTp9zm7ejMGSoF2cKWDrT06w\njPzezxZZsX6P6OyXRoSxMnt9kAHCJ5S8Tq64HENyAzU/ayuqqcOpmnPQZO2DOp3TMgi9BZqsPFQl\nr8Em80rUxP+Id4/9A7UWJ/p2i4FHVxV4HR80qwFBstD2/hlpSRHolRaNznHhmDou0HRCgMC8qUPx\ntzlDkRyvD/j91MUGwUkNQEg0M0nMTS88MBB/fXgwpgzvKmwz6Lk+Cdep0DVRHBBZhoRBr4LJYABB\nMoiOYaGjtHjzkVugp/WgDPXQZOVB0/0Q6uN+RoOnFlpt4N9+UA6n1VKRokmwn6m35MYIvP77IaBI\n8XmQ+sCIMpqkkeoT/lGaSKQlRWD+/QNkznAeNaUS3nv//0ZKggGRvixstYrCX2cNwt/m3CDbZ/bk\nbAzKEqtb82ZNad0xaR5H18guCKP1AXXJgiEt9gdAFqUXDIfXGRCCKzWFqSk1uiZG4Nl7+yM6QovM\nFCNidaIqrqXFSWdStNyE1hakk8/2qEmnCIzLhHdyx2qjoaU1Mj8FP2uK13PmD16YSF+6EZ1uwH2Z\ndwCAEBabFJaAv94gX+GNn0jQJAWCIGBQhcPisoJhGVk01v78Ylh9zl5S24RQtXPqfILEaBBfHlLT\nhLe/OhR0fwCwO51Y+r8TKKvhzk/oxJknGc7lLagZ32zKN5jr1DSeGfQHOI4PBevSyQZJAABLwu1h\nsOVgCfJLG3D8oiQT1z/U0h+GxLBeCWDdajDwAj6HNC8wqiwWnK7jIpqoKG4AJOhQGgSLzO5aVDXV\nIDs6A0/0myMrvQEAsWGSPzDBYN5dOXh51iCwFPfMe0WJORNRmkioSBoxkVoYwoJYfBlKJjzvzZwO\nAIiJ4NquVpFIS4pAXJQOOg0tPCfp8/I/H0EQQnCAxWOGQcMN/iZ9YPn3Wkd9UBOph+ImEmRELQiW\nwpP9/4D7/RI4Y6N0MvlNRnGz8Elp44RtYbQeD/e6Hw/1vBeT08XtwXJ7VKRKeIcpsnnreHJcuCBA\neIZkJ2DOFDH0lNdspIUNpUETNEnjqQGPYG6/2SGvw9+LVLtqDXyFgM7hSfhN9gzM7vUg+sSLpt5g\nUVaxkvdMS2vh9D0XLRUY6HG5SDWMYAEkl4oiMC4TqYaho7RwMx5htsSbpLr5in/xAkHqBM2K7iGU\nz270aSBJ4QmI0Ynx6oCYNU6RFEqrbdCSeljdNhw6L58B/XvzMdQ1ijM+QheoScy81WeKIBhQ3UWt\nQ9XlDC7EfBX6Zn3Zrkfya+Efa0+buPpREwdk4ZHbewl2XRVNITk6Gl189bBovwJqg3pw9tTPN53F\nq58dwFtfiv4Gf23En37d45FkCuNCCwFouh/mjvMJDIZwwUKKobXanK3QxAQvI9I9RY+vyrnaPT1j\nMtHdmCYk4fFInbcurwuFlmK8lvcOCiycmW9wsigwpLNGKkgeDRhKMM+pSBpRvrwVmiLx6u8G4405\nQ2W7L3x4EF56MBcRIWzaN/l8SfzskQULg4oT3v6CDwDeP7IcR6qPB2yvcdQBKgdIvQ3xqmR0i+oK\nLa0Vssr5d1caiBEex/lZhiQOEO4jTKWHURuFgfE5gvYkPV4adq2mVMJ26cDWVuQCQz5Qx4fFoVN4\naFv+gLi+IX9rDkFgGJIxKKE/+sZlY+508VyaICYtqcBQS/4f0j5qKxpFw7g64J3cERoDNL6Xklcp\necGh80l0fjtNUvhjzu/QP64PsmMyBDOMwydQgtlJGZ+mQBM0FizPQ0kZZ756f+0B2X5u1oll6w4L\n3wltI/qkiy/kotlDcHM/bvAmdFbYabmDMpjJQPyNExiFFVYx1r5JvlZ7H1M2BmSYkBjLvex8WfPo\nCK5vKFY+gzRFBnEi+khLMGJk8nCMTBgtbJtwQ4rweVBWHBKM+oAY/pRon3mCdsvMJYTaCZguIhgT\nRkcJs9x+vnUr/GfDpETQO70u/OPwRyixlQlVhWO0opCPUIt263sypiMrugf6SbK2WS8lxNtThLz9\niTFhiPQTDHqtCikJBoRiYAbnV5AODHzQRLTWGPQYvt1SKu1V0Edxs+qsBLGvn+z/B/SOzcJNyUMD\njnEwTYhQGxCliRRm0KHs5A9k340exm6YkTFN2Kajdbg/6y5kRffAjIypIe+xJWb1vBc5pl5CUU6D\n5BkEGyQJgsCtKSODnitSI+/rgfE5mJJ+GwBgbs5spEWmIi0yBX/M+R2eGvCosB+fQBoqdDU2iPDO\nMfVC5/AkDO80BARB4MGe9yI7JkOoDdYe6CTmN52iYVw5mnxCQEdphZeSV/X5AYhXLfnkNYqgkBnd\nHQ/3uh80SYvJeE7O4Rh0VSyJhgFAmFUTanmoLkEygCTKJyNNh8nDuoKMqgQVUwozSsTw2yDhkoHX\nFWeS94xJ40IojRWCOcpIibbjGzvdgISweBAEgbQk7g/HC0OSt3n7JTnRFPc9XKfC/Pv7y+oZdUuM\nxh09JmNaFlcFlCCA6Tely47vEm+QtREApt+UBS2lBaltBEF5oWODD5hSLG4uDHV81zFC5VP/SBq3\npIS10+uUZdbTBCUTEtI/ZYzOiMdyfitoWQA4k5RPw6DIdvj7+bpAWpCQHzCpED4zf/qaeoEFi/65\nnKAMV4uDTEpEZ8zp81DIwUa89+ZLtncKT8Tj/WbLZvcGdThiddF4LOe3goZyOQyIz8Hvej8gvHMR\nEg3Df40IninptwlFLKWoSJXMn3JH98mCcMmI7oanBjyCpwY8iszo7kiLTMFknwmLD3XPjA4euhps\nMa6UiM7406AncI9PiCaExeHRvg/LNKS2EtGMtnU5KALjMmBYFnU2K1QkDYqkBNugU9AwuLwKq110\nSgOBVWzF4oKcH0BH63DgTBVe/mSfkHfAD16sLymIL33gLzCmjOgiC/3M7haGiCgvND0OQZ1+DEsO\nf4Q3+UVgWiioBshfrthoNdSpp6DpfhiaFC45qHuMOAuVvuB8e/k/r94nCFS0/FUb2b8TeqfF4Jl7\n+qF7chT+8lCu8JtKkgX+tzk3YPGjw2THEgAXXukn+LQqDQzqMCHOPkmdgpao8/W9NGLFX8OQJlZW\nNsqjZwxqg6ySribIn1IqUITkMoQOgmgNyeGcKSpGy5nApNopb0LrFtU18MAg9PVVyT1v4TSP5gaW\n3Ph+su/8s/f6+kztZ3r0R9rPsn5pR1o74PJtkZrZCIKAXmIS0rcQWcQLaqvbBj2tQye/pQf4SaNs\n0vALIu2LYH6US6XD8zB++uknLFq0CCzLYvr06Zg9W+5wKisrw5///GfU1dUhKioKb775JuLjAyX/\n1cSP+0tQZbFCreW6T+d7ELxg8LAeUASFH/LKoE4VTVLSGd+u4+X4Pu8ikCqag1irEZuPl6CwworC\nCiuyUqMFH4aHlwW8fd8vL8Bk1GDUoDjsKOOcvRaXFS5G7tw0O+u5UhwSgTGmy83IieuFN/cvke2r\nV+kER76bcYMy+ZKytJypZ2DXFOT5cp2ksxh+sOVLfky7KR3WRjf0XapwSJKQHqFX40lJKKlJUgVW\nWpBQWh2Wh5//d4oxQOrJoQgK6ZFdhSTKPp07o1Nxb/zkDlz1LtPYHafN54QS4M0JDGnme4lNXnbE\noA6Xze51QRyW/KAOcFFNvJ+nJUdvczzZfw6qm+oEE4ha1gbufcyM7o75uU9AS2tAgMCLu18Peq7U\nSC5xsMHF2eGbs3XPyJwGFiz2VnAmUX7QZ4Xn3rwQlPazQdV+M2kprRVEYT5tiCAILLxhvtB2Pa2D\nxWWFhlK3GB0lnSwYg9R/e3HIM/CynhbP01FI+yJYbbpLpUM1DIZhsHDhQixfvhzfffcd1q1bh/z8\nfNk+b7zxBqZOnYpvv/0Wjz76KBYvXhzibFcPRZVWEJQXXrevTr1Pw3j74AdwMx54GS9nw/dFDPE+\nDelL8/X2CyitFjOUWYbEN9+bcbqIm/Gabb6y4L7h8WKZzXcObpDhtQn+pf/3qS+xs2y3cD6ryxa0\nxIe622GAFAdEkiCRGtElYD+pw1ZaD4onQmLrldqMeQHHx4lHhqkx944+0Gha/6q1NEvlX/zBWfLZ\nnNPrlKn+iYY4GLVRQvE6KXwJdEFgSEwp/jMx6aBQ5icwItThsnLjwSJRpNFKM0ZmIj6aO39bNAwt\nrUVng1hXSaphSEMpkw1JiNXFCKVUghGtNcp8D81F06hIWgjmAAI1DLKZPAlA/h/oMA2jlYJI77Pv\n0yQNozZK8F/w70Jr8hakzz7Y/URqDCF9Sb8E7WneAjpYYBw9ehQpKSno1KkTVCoVJkyYgM2bN8v2\nyc/Px5AhQwAAgwcPDvj9aoQiCYASywdLVfgfj52Gm/Fwhcj8on1on5OTYVgu81Zig2cdepmdf2Ne\nMSrqGsH4ykY3WDkBMaA7p311T+FeBOlAJ7W9W11W2SI1PISxXKZhhKobJZ2NVNgDcxSkL2JQDcPv\n1Wrt+gAAoAoRMTM3ZzZyTL3RO5YrC6Gm5YJlYFIf9I7NQnZMBrJjMpAemYKMzlEy53jPmExMShsr\nzKJ5k5Q0MmV27wdk5324l7igD78uSW58P6REdMaQxFxZXwWbnUsHkv7dEpAcz5mMyHacdUo1jFCm\nh9vTxwfdriJpmKQ5AS2YLqSDJP8eiBpG64eU9h7MeCiSwm2po3F/ZvPruuTG90NqRBdM6ipfMY+f\n4Blb4VeRTiY66n7agqGdhXKHmqQqKyuRmCg6ueLj43Hs2DHZPpmZmdi0aRNmzpyJTZs2obGxEQ0N\nDYiMbL/klUuhsrEalfYq9DGJ64t7GS/2Vx5GuDoMMVojKMpXh943EDklTtGvd5xBeGYTAEJWrhgA\nVqw7DQpqJETr4XR5uX1YX66Fnz2+uMqGPy/bA20uCwJiJcy4yDDADnTtpENBCaBX64FG0dbDVUoN\nw4WGQlQ1BWZ1A8DtNyfj+1IuoopfT0JF0rIiidI/fqldHlEFyGdx4ZLPjJ8PQ9h+CQIjlIaREd0N\nGdHdhO/SGfp9mXdCRamgpbV4VJJRn5KgRVykATUOTksalzoKaZGp2F/J3b/ZGejDSDYkYU6fB4Wy\nEfF6E+7Pugufn1opmLumpN/GFRL0I9jsXNoXakoFb4hktbYgHbjUIQTumJSbsbFwK5o8TQhXhcly\nDWJ1MSjyLbHbUjQNHWRW7b0MgRGh6RgNAwAmpo1tcZ+smB5BF9sq9i1Z0NmQ3OI51NTVLTDaW4vr\nUIHRmlnls88+i4ULF2L16tUYOHAg4uPjQVEt/5FMpvbriItlDVjwr134y+9uwF/3vAkA+GjKm3j1\no0PomhSJrr3N+PepL4X9E2unAQkAvDRMJgN6etOxmq98Tbnh8no4E4Ff2Ofhs3UB0UJC9U82uCrP\na/jJJgOevnsk8puOA2UAVNwf1D8qJVytR6zeCKvLhnUXNwU9Z2Q0CXBjA4Z0zYHJZICaVsPtEgXG\n6G7D8PmRbwAAlY3ytRwM6jAkxEchOSIRJZZypCclQqviBpnBKX1wpPo4hnfNlT2jwal9sK/yINdF\nJNXs84uOCm/V842yiKG5Rl+YbrDjjPow1Dg44RkfEwWT0YB4t9xM0DnehCideGysVwwbNpkMiHOI\nExiKINEtuVPQwTEhxhi0DSZ9NKob65CcEIvhzoE4Xnsao7oNbbf3ONYptjc+Jkpotz86lYYTGBq9\nIDBMJgO6xCTiYBXnlEoyRcMUEbpdsW7xt86mOJhMBtzSbTjWnNqIYWn9Wrwnoy4S5qYGdE1MANke\nkWKt4FL6+Zb04fghfwduzRwGU2zzx0nfk0RjbLuOS+1BNMNpzjG64O/lpdKhAiMhIQFlZWJNosrK\nSsTFxcn2iYuLwz/+wUXvNDY2YtOmTQgPb1lSV1e338pzH6w6ggabC/9ceRjwBTOcLazCyYt1OHmx\nDr0ZefnwixW10CYAjJdCdbUViVQyRnW+EVuKd3DVJwkGHg9Au4xgGULMcfDTIoZkx+MwSwBgYdBp\nkZMdj9QEg69SqRwtTUNPE2iycyakeht3/2F+AiNMFY4Hs+7DS7tfR4MzeB9VmH2z5LTb0FWTjupq\nK2i/V2FI9GAQWTQ+O7Uy4DxhqnBUV1vxZM4f0ORxwFrvhhVcu/oY+uL5QQlICIuTPaNMfRaeHzQP\nakoFHa1r9vlZrc5WPd8muyjgGm3c9YMdR7HiLNBmcaPaY4XTLndsN1oYuG3isVZJaZXqaiscdtGM\np6f1qK0JngnssHmDtuFPA59Ak8eBhjoHeoX3xoLBTyFeH9du77HDJravyeoB4oP3Bf+c9ZQerw9/\nEQC3n54V/3N2iwfVId4dALBbRTOmysU9y9EJI9Ensg/i1aYW7+n53Hlwel2orb20bOrLxWQyXFI/\nT0geh2GmoTCyMS0eJ+0L0qVq13GpvXh12PPQUGpUV1vbLDQ6VLz37t0bRUVFKC0thcvlwrp16zB6\n9GjZPmazWdBE/vWvf2H69Okd2aQAVp37FsWxawCwcHvFQaTW0gQirB7agRtxplae9KXpxS1KI3Wm\npho4xzHd6TwI2g2vh0CSMdJXNZRH1CLuvaU7Fxnk29TFFInfT+6JsYO6IJjfUKfmrsWbA/jIK38N\nI0IVjhitUWai8IefWXaWhPr5JywRBCH7HRAzhw2+DF4trQ0wyxAEgaTwhIDZN789VhfTYiZr0Azp\nFvZrLgpFaibizV3+Ic6qFiKWpH6BULkG/tfy3873FUEQQt5Ke6FqhQ8DENdmMagNMKjDBTNKrFZS\npqKF0hRSk5RRw90TSZBCKZyW0NG6NuVddDQqShW0rEowpObTjnLit5UoTWS7FB4EOlhgUBSFBQsW\nYNasWZg4cSImTJiA9PR0vPfee9i6dSsAIC8vD+PGjcO4ceNQV1eHOXPmdGSTAthavBNeqgmg3Sis\nEGcHtRY7VMnnQJAsqCi5L4CvYc94SWw7VAqHywPGJzxI38IpnppOiI7QyipW8lnPE25IwS0DO0Or\nocAHiUoHsHf+OBzvPX6j/JqEPHafTxL0H7wM6nAQBCFzRPeKycSwJHGJSb54oXSQeajnvUJsP480\n8oYAgUlpY9E9Kg03JcvzItqb1trBpWGpzfkDpEEJ/D13MSRjeNJgDEro36rMWqmturnY/PYov3A5\nSNsXyocBiD4rg0qeac9nIhMgWizTIRWuVypc9GohKTwBA+L6IsfUC10jW877+bXT4XkYI0aMwIgR\nI2Tb5s6dK3weO3Ysxo5t2UHV4RByE0V1gz1gm6c2EXSMGFZJ6uz498YzWLPjAqyohdZXB411q+Gt\nTEF0sgasVRxA/v7IMFSZGxEbyQ04Xi8raBjSMhF8ZdL3Hr8Rz+3eAACIjfCtSyxoGJwTV6+WD168\nw9KgNqDWFzI6NGkw+pp6IlITifUXf8AZM2fykg4ycXoTHup5DxbuFcOapb+rKBUGxOcIizt1JC3F\n8vNIhWxzA5c0N4IXgiRB4p7M0Nqs/9xfrmGE1pDao8Db5SAV7s0N+E6GExj+CYZGbSQogoKaUrWo\n+VxKAMO1Dk3SmNXrvivdjF8MJdPbB+GX/Xy+3MyV25DANsp9K14z54+xNLoBiXmKcHEDSpIpDGlx\n8toycUa9UC4jK9WIYBoGT7hOkhRk8C0cwwsMn4bhb97hB05pxAavNvsPJP61q/gQPF7TkIdqtl9x\nuFBkGrkcigR9XAt7ckgTIZvTMKTFA1syPfGYfAlx6ZFctrQ0ciiYhsGbWH6JfgpGa58VHzLqXwyP\nJEikRCQjTteyWSnKV0KlV0zm5TRV4VeMsuIej59wKKpqgLqrXIjwdZwAgLFFwlstht2xXvEP27dL\nF4wZOBApCQYYk3pg2fFdQS+ZnhQJgltMLmTNGx6+fEGAhqGSz2j5QTRYPR3/gcTfzxGm0uOVoX8W\nqoxKZ9XBqm22N4/0nYUGl6XViU5SgdFc1jQ/6ANotd8gShOJhUPnC3ZpaRhxMB/Gi0OegcPjaJds\n2stB+iyb81/xBCth8oc+s1p1LaM2CouGLQgIuFC49rkuNYyLDUV4ctvzOFEtqdrpX1+JZBCml89a\npYvcM049ZIYLSQhtosGErokRIAkCRq28qmsoWhQYvA+D9PNh+Jmk+MFemrDDzz79naHSWSmPURsl\n2PlJghTs4cEGmPaGIqlLyoqVmqGac5R3jQzMZG8N0Vqj8FykgiaYhqGh1ELxwiuB1G/RnFDk7ydY\nNrRepWvWoS8lUmNo8Z1VuPa4Lp/4tpKdcDFufHTsc2EbQXrlNUpJBrSKBXw5eRRB4Y+398Oyk1wu\ngX8mM0CgK3JhiG3EIEmBttYm86iIVgoM3358VrdUCPSOzcaozjcGXJefcfo7Q5tzjvJoKQ1cXleL\nS1ReCaRmvOYGL5qkcX/WXSFXNbxULiVr/Zeitaa2ZwY8hr0VBzDwF/BFKVx7XJcCIymMq0HkgmQt\naz8NIz5aI8t81tIaGHSi+Sc90YiTkrJYqQkGPD1qVMC1WiswWjtb899PRYnfH8i6S9AOpCF+oU1S\nLV9TFeLYqwFpoEBLpbxvSBzY7O+tgVu73YFGT+Aa6Fea1prCkg1JSDYktbyjgkIQrkuTVNAoD5LB\ntBGirXvCsM6yOkvcetrioKShxcFq4cOD8Nx9/YNeix+sW4o7D+b0ljXPZ/7yH+SlA7lUY5CaHEST\nVKCjsyV438nVKDDkGkbHh3dOSBsDgFs/4mqlNf4LBYXL5brSMHYfr0DnuPCgBfe0WmDs4GR8v923\ngWACNAxaMqPVSArfRUdooVGFHrAWj1jYYjJaixoGQQTdL0IbqEkAwZ3e0t8Xj1jY/PWEywa/7tWA\nLHGvHesyhWJk8nDkxve7KmsGAcDfR7z8i/SDwvXL1TcKdBAWuwsffsetijVheqDAYDsfxpGabOG7\nw+uUVX/VUhrZoCmtlKpRN/8nbc1KV6H+6FpKA4fXKeQS+O8XFaKAm8zp7Zt1EhKFsrWrb/ECg8HV\nZ7eXmaR+AQ2DIIirVlgArSvHraDQFq4bgeFwS0t6B3d+fnzi/4TP58wXZL/paJ3MHCQ1SbW0BkBr\nCPOJaOkAACAASURBVKWBPDXgUewqy0NuAudIlwktUgWtSouHsu9Bo9+aFTpaC5qg4GG9wjHJ4YmY\nlDYWWSGWkQwG79y/Gh29MpOUMrNWUOhwrnmB4fYwUNEkHE5RSJTUNLR4HL9GL0+YSi8brLWq9rUV\nEyEERlJ4Au7oMVn4Lh0keS1iYEK/gOO42bABVpdV8FUQBIFxqaMD9m2+Xb6lYf1WobsakIXVXoUm\nMwWFa41r2um9+3gFfv/3bThxsQ4Ol6hhnCura+YoDtbPBKOndbLBWku3rxO4tQllJEEGXew+GCkR\nnZEYZKH7S4G/1tVokqJbmemtoKDQPlzT07LvdhcAALYfKcONfRK50FnCK5T8YL0UtxBSK9CpdDKn\nt1atgpCk0Q6QAdWLQsOvatfSalqzet4bIPguFT5K6qrUMCRC4kplWCsoXE9c0wJDit3phDZnGwja\nDdbLDTSsR9VqgRFG62UmEJqkMWt8ulCBtq2EMkk1R0sO2PZwBMfoolFiK7uiWcyhkN5fe5YKV1BQ\nCM51IzAsDjsImouOEoSE34p4BAjZjJwmaSE7WK/SyWaxNEnjhj6JaC8uRcPg8V+voiOYkTEVJl0M\nbk0Z2eHXulQUrUJB4ZfluvnHNbnk5iOWIQG/Nbf91zKWRkX51w9qb5v55WgYWdHd27UNwYhQGzC1\n24QWFz1SUFC49rluBQYYEp4KMbO7d2x2gMCQRkX5F2Vr77j/SzGpdI9KQ7gqTFj0RkFBQeGXoMNN\nUj/99BMWLVoElmUxffp0zJ49W/Z7eXk5nnvuOVitVjAMg3nz5uGmm25ql2uzhAf8ehNNbr9kPYZC\ndlQvPDLiTqF0xuv73gVgFnaROrlV5KXXYboULsUk9Xi/3wuObwUFBYVfig7VMBiGwcKFC7F8+XJ8\n9913WLduHfLz82X7fPDBBxg/fjxWr16Nt956Cy+//HK7XLvR3YiGtG+hSjsKsCwcnkCTVFyUDhpa\nDYIgQBBEwBKl0sJ+ejr4uhNtha//dCkmH4IgrvulMRUUFH55OlTDOHr0KFJSUtCpE+ecnTBhAjZv\n3oz09HRhH4IgYLNxa0xbLBbEx7ctb4CnxMYtpUrHlsNTx8LrdsvuNkKnxR3D02XH3J1xO0z6GKy9\nsBEAV8jt2YF/xMWGIsToomX7tlexu+cGzsWR6uPIjslol/Ndb/y218wW63QpKCi0Dx0qMCorK5GY\nKEYSxcfH49ixY7J9HnvsMcyaNQufffYZHA4HPv7443a5tsVlFT5X06cR5omS/R4drg8oGKim1BiX\nOhqbCrfC6XWBJmikRHRGSkTngPO3VzG+hLA4JIQFlkVXaB394npf6SYoKFw3dKjAaE39oXXr1mH6\n9Ol48MEHcfjwYTzzzDNYt25di8eZTM0nrTmq7cLnuoj9sJcMBiSH6DTakOfQqrRwel3QazUh94mN\njoApuvk2/FK01BfXE0pfiCh9IaL0RfvQoQIjISEBZWVlwvfKykrExcXJ9lm1ahWWL18OAMjJyYHT\n6URdXR2io+UmIH+qq63N/l5YUyb7bnXZIXVbE14y5DlI1ldwz0OE3MdS70C1t/k2/BKYTIYW++J6\nQekLEaUvRJS+EGmr4OxQ42/v3r1RVFSE0tJSuFwurFu3DqNHy4vfJSUlYdeuXQCA/Px8uFyuFoVF\na6h1mGXfNTp5RrfUoe0PnxDWnNlJSRpTUFC43uhQDYOiKCxYsACzZs0Cy7K44447kJ6ejvfeew+9\ne/fGyJEj8dxzz+GFF17AJ598ApIk8cYbb7TLtZ1eeVRUdrcwnBCtVOgcHjpLmmqFwGhrjSYFBQWF\nXxsdnocxYsQIjBgxQrZt7ty5wuf09HT85z//affrerxyjYJWuwGJwMhsZk0IoRx4u7dKQUFB4dfL\nNWtXaWiULyjkJpwAAJMuBrG6GKQGiXziEUp6B0mOG5k8XDiPgoKCwvXENVt80O50Qerltrs59WJq\nt4noa+rZ7LG8wPAGERh39JiM6d0nKdVRFRQUrjuuSQ3D4fLAw8hNUjafwGhN/kRzGgaglNJWUFC4\nPrkmBUZJlR0g5E5pm5vLJm9NlVmqBYGhoKCgcD1yTQqM4iorCEI+2PNRU63RMAhFYCgoKCgEcE0K\nDLPNGaBh8LSmBhTVjA9DQUFB4XrlmhQYDTYXQLAwaePw0pBnZb+1hw9DQUFB4Xrk2hQYdhdAMFBT\ndLOLIoWiiyEZAJBsSGphTwUFBYXrh2syrLbB7gKMLGiSChQYRMu3PL7rGMSHxaGfSamEqqCgoMBz\nTQoMi90FgmBAkRRokoaaUsN1CU5vNaXCDYkDO7qZCgoKCr8qrjmTFMOysNidACE6r/W0uB63Slmp\nTkFBQeGyuOYEhr3JLUQ38cuoSgVGey18pKCgoHC9cc0JDN7hDUBY91q6XnZ7rcWtoKCgcL1xjQoM\nLgfDX8NQkbQgRBQUFBQULo1rTmBYbFKBwd2emuKqEOppfcjjFBQUFBSa55oTGMFMUg4vV+pcr9KF\nPE5BQUFBoXk63AP8008/YdGiRWBZFtOnT8fs2bNlv7/22mvYu3cvCIJAY2MjzGYz8vLyLvt69TYn\nCD+TVKO7CYDc+a2goKCgcGl0qMBgGAYLFy7EJ598gri4ONxxxx0YPXo00tPThX3mz58vfP78889x\n6tSpNl3TItEw+BIf3aPSkN9QgF6xWW06t4KCgsL1TIcKjKNHjyIlJQWdOnHrZ0+YMAGbN2+WCQwp\n3333HR5//PE2XVPu9OYExviuY5Ae1RVZzSzLqqCgoKDQPB3qw6isrERiYqLwPT4+HlVVVUH3LSsr\nQ2lpKYYMGdKma9bbHdBm7QMg+jAokkJ2TIay8JGCgoJCG+hQDYNlg5cYD8a6deswduzYVg/qJpMh\n6Hartw5Qcet3h+t1Ife7lrge7rG1KH0hovSFiNIX7UOHCoyEhASUlZUJ3ysrKxEXFxd03/Xr1+Ol\nl15q9bmrq60B29weBvZGBny5QZfDG3S/awmTyXDN32NrUfpCROkLEaUvRNoqODvUJNW7d28UFRWh\ntLQULpcL69atw+jRowP2u3DhAiwWC3Jyctp0PWujS/ad92EoKCgoKLSdDtUwKIrCggULMGvWLLAs\nizvuuAPp6el477330Lt3b4wcORIAp11MmDChzdeTOrwBgFSyuhUUFBTajQ7PwxgxYgRGjBgh2zZ3\n7lzZ98cee6xdrtVgkwsMpW6UgoKCQvtxTdls6u1OIQcDUExSCgoKCu3JNTWiXii1yE1SisBQUFBQ\naDeumRGVYVkcvVALvVa8JTfjuYItUlBQULi2uGYExsot52Gxu9A1SQwbczPuK9giBQUFhWuLa0Zg\n7G/4Cepuh3BzPzGz3O1VBIaCgoJCe3HNrFfaFHUGFAC1WswUVzQMBQUFhfbjmtAwjteIFW6bPA7h\ns0sRGAoKCgrtxq9eYJRbq/HB0Y+F702eJuFzj6jgVXEVFP6/vTsPbKpKHz7+TdK0LC2b3QCZikVB\nsAqoLMKUdYChBVoBFas4U6SAQNlEFgXGqQNYmAr8FBVBQUBRXwGFMOpYQUAqKIIwLDrgQGmRlq3Q\njaTJPe8fLSmhQFJoUtM+n79yb05Ozn2g98k5595zhRDl5/UJI+PsRYftgpIeRmTjjrQLbVsZTRJC\niCrJacLIysryRDtumk6vOWxv+PVzAG73byTLmQshRAVymjAGDhzI2LFjSUtL80R7yu16V0LJOlJC\nCFGxnCaMr7/+mh49erBgwQL69u3L6tWrycvL80TbXHLJZr7mflkWRAghKpbTs6qvry8xMTF8+OGH\nvPzyy7z99ttERkaSlJTE2bNnPdHGGzJbr93DkIQhhBAVy6WzamZmJv/85z+ZNGkSHTt2ZOnSpdx2\n220MGzbM3e1zymIrfgaGKvJ12C8r1QohRMVyeuPeyJEj+eWXX3j88cdZu3Yt9evXB6Bt27Zs2rTJ\n7Q10xlwyh6Hl18VQ77R9vyw8KIQQFctpwhgwYAC9evXCYCj7i33jxo1Ov2Dr1q3Mnj0bpRQDBw4k\nISGhTJlNmzbx+uuvo9frad68OfPnz3ex+Vf2MIwO+w0y6S2EEBXKacKoW7cuBQUFBAQUL+p38eJF\nDhw4QMeOHZ1WrmkaSUlJLF++nODgYAYNGkSPHj0IDy+9oe748eMsXbqUDz/8EH9/f86dO1euA7CU\n9DCU1c9hvwxJCSFExXI6bpOcnIy/v79929/fn+TkZJcq37dvH2FhYTRu3Bij0UhUVBSpqakOZT76\n6COeeOIJ+3c0aNCgPO2nSCt5jvdVcxgyJCWEEBXL6VlVKeVwA5xer8dms7lUeVZWFg0blq4eGxIS\nQnZ2tkOZY8eO8b///Y8hQ4bw+OOPs23bNlfbDpSuF9Whxe0O+6WHIYQQFcvpkFTt2rX56aefuP/+\n+wH46aefqFWrlkuVK6WclrHZbKSnp7N69WpOnjxJXFwcJpPJoVdzI5dv3PMzOA5JSQ9DCCEqltOE\nMXnyZEaPHk2zZs0AOHLkCK+99ppLlYeGhnLy5En7dlZWFsHBwQ5lQkJCaNOmDXq9nttvv52mTZty\n7Ngx7r333hvWHRRU8qAkHw3McGeDO6gd/Ef+fbS4hxLYIICgBgE3qKHqsMdCSCyuILEoJbGoGE4T\nRps2bTCZTOzduxelFG3atKFu3bouVR4REUF6ejqZmZkEBQVhMplISUlxKNOzZ09MJhMxMTGcO3eO\n48eP06RJE6d1nz6dC0ChuXixwaJLiu53dbUnjIsXLnHalutSO71ZUFCAPRbVncSilMSilMSi1K0m\nTpceoFS3bl26dOlS7soNBgMzZswgPj4epRSDBg0iPDycRYsWERERQbdu3fjjH//It99+S1RUFAaD\ngeeff97lhARgVcXP7a7h44tRX3o4cqe3EEJULKcJ4/Dhw8yaNYvDhw9jsVjs+w8dOnSDT5WKjIwk\nMjLSYV9iYqLD9tSpU5k6dapL9V3NqornMHx9jPjoSg9HL5PeQghRoZz+DP/b3/7G+PHjCQsL45tv\nviEhIYEJEyZ4om0usSorStPhZ/DBx6GHIQlDCCEqktOEYbFY6NixI0opgoODmTBhQrkvfXUnqyoC\nzYDBoHe4/NeglyEpIYSoSE7PqvqSE2/dunU5fPgw58+fJzMz0+0Nc5WN4oThY3A8FOlhCCFExXI6\nhxEVFcX58+dJSEhgyJAhaJpWZg6iMtlUEcrmg4/B8el6ch+GEEJUrBsmDE3T6NixI/Xr1ycyMpJd\nu3ZhNptdvqnOE2xYQfPFUKaHIQlDCCEq0g3Pqnq9nhdeeMG+bTQaf1fJQlMams6KshnK9DBkSEoI\nISqW05/h4eHhZGRkeKIt5VakFd+DgWbAoJchKSGEcCencxjnzp2jf//+PPDAAw5rSC1cuNCtDXOF\nueR53sVzGI4JQhKGEEJULJcmvaOiojzRlnIzW0tuJLziKqm764XzS85Rh0tshRBC3DqnCSM2NtYT\n7bgpFq00YVwekkpsk4CmtEpslRBCVE1OE0ZiYuI1f63/voakSnsYOp1OJryFEMINnCaMbt262V+b\nzWa++OILh0esViaz7XIPwweDQYaghBDCnco9JPXII48watQotzWoPC4nDJ1mQC9zFkII4VblvpRI\np9P9bi6ztZQkDKPeWMktEUKIqq9ccxhKKX7++Wc6duzo9oa54pK1eA4joIZrj4wVQghx88o1h2Ew\nGIiPj6d169ZubZSrLhQUAFDPxWeMCyGEuHluv6x269atzJ49G6UUAwcOJCEhweH9devWkZycTGho\nKABxcXEMGjTIpbovFBQCUN+/5i21UQghhHNO5zCGDBnChQsX7Ns5OTnExcW5VLmmaSQlJbFs2TI2\nbtyIyWTi6NGjZcpFRUWxbt061q1b53KyALhYWDwk1cBfehhCCOFuThNGQUGBwzO269WrR15enkuV\n79u3j7CwMBo3bozRaCQqKorU1NQy5ZRS5WhyqcKi4oRRt1aNm/q8EEII1zlNGJqmUVAyVwCQn5+P\nzWZzqfKsrCwaNmxo3w4JCSE7O7tMuS+//JIBAwYwbtw4Tp065VLdAFatuB1+Bqcja0IIIW6R0zNt\ndHQ08fHxDBkyBIAPPviA/v37u1S5Kz2H7t27Ex0djdFoZM2aNUyZMoUVK1a4VL+1ZLVaPx8/l8oL\nIYS4eU4TxogRIwgODubrr79GKcXjjz9OTEyMS5WHhoZy8uRJ+3ZWVhbBwcEOZa4c7nr00UeZP3++\nS3UHBQWAQYENgm+rU7xdTVXnY7+axKKUxKKUxKJiuDSWExsbe1NXS0VERJCenk5mZiZBQUGYTCZS\nUlIcypw+fZqgoCAAUlNTadasmUt1nz6di7nIAnowFxRx+nRuudtXFQQFBVTbY7+axKKUxKKUxKLU\nrSZOp3MYY8eOJScnx759/vx5xo0b51LlBoOBGTNmEB8fT3R0NFFRUYSHh7No0SI2b94MwMqVK4mO\njiYmJoZVq1YxZ84clxtvU8VzGDWMcqe3EEK4m9MexokTJ6hXr559u379+qSnp7v8BZGRkURGRjrs\nS0xMtL+eOHEiEydOdLm+K11OGH4+vjf1eSGEEK5z2sOw2WwOV0UVFRVhsVjc2ihXaap40lt6GEII\n4X5OexidO3dmwoQJDB06FIAVK1aU6TFUFhsyJCWEEJ7iNGFMnDiRt956i7lz5wLFa0u1b9/e7Q1z\nhYYNpekx+sgDk4QQwt2cDkkZjUbGjBnD66+/zp/+9Cc+++wzpk+f7om2OaVhA01vfzyrEEII97lh\nD8NqtfL111/zySefsHfvXqxWK8uWLfvdrFar0EDpr/kIWSGEEBXruj2MOXPm0LVrV9asWUN0dDTf\nfPMNdevW/d0kCwCFDZ0q9zOghBBC3ITr9jA++OAD2rRpQ0JCAh06dAD43f2SVzoNNJm/EEIIT7hu\nwti+fTsbNmwgOTmZCxcuEBMT4/Kig56idDZ0yBVSQgjhCdcdz6lTpw5xcXGsXbuW119/nQsXLnDp\n0iXi4uJYs2aNJ9t4fToNPdLDEEIIT3BpAqBFixa8+OKLbNu2jbi4uGs+06JS6DR0ShKGEEJ4Qrke\nJGE0Gunbty99+/Z1V3tcpikNdEp6GEII4SFee4lRka0IQBKGEEJ4iNcmjEtFJQlDJwlDCCE8wWsT\nRmFR8QKIBulhCCGER3htwrhklR6GEEJ4ktcmDHPJkJSPrlzz9kIIIW6S2xPG1q1b6dOnD71792bJ\nkiXXLff555/TokULDhw44FK9l6wlQ1J66WEIIYQnuDVhaJpGUlISy5YtY+PGjZhMJo4ePVqmXH5+\nPqtWrSrXOlXSwxBCCM9ya8LYt28fYWFhNG7cGKPRSFRU1DVv+lu4cCHDhw/HWI4HIZlLLqv1kR6G\nEEJ4hFsTRlZWFg0bNrRvh4SEkJ2d7VDm0KFDnDp1ii5dupSrbnPJpLdBLz0MIYTwBLeebZVSTt+f\nPXs2r7zyisufuczoV5zravv5ERQUcPONrAKq+/FfSWJRSmJRSmJRMdyaMEJDQzl58qR9Oysri+Dg\nYPt2fn4+R44c4amnnkIpxZkzZ3j22Wd54403aNWq1Q3rPncxHwBl03H6dK57DsALBAUFVOvjv5LE\nopTEopTEotStJk63JoyIiAjS09PJzMwkKCgIk8lESkqK/X1/f3/S0tLs20899RTTpk2jZcuWTusu\nKhmSMsqQlBBCeIRbz7YGg4EZM2YQHx+PUopBgwYRHh7OokWLiIiIoFu3bg7ldTqdy0NSFs0KgNEg\nCUMIITzB7WfbyMhIIiMjHfYlJiZes+x7773ncr0W2+UehjxASQghPMFr7/S2lvQwfKWHIYQQHuG1\nCaPIVpIwfKSHIYQQnuC9CUN6GEII4VFemzAuD0n5SQ9DCCE8wusThgxJCSGEZ3hvwlDFCaOGJAwh\nhPAIr00YNs0GgJ+PbyW3RAghqgevTRiXexgyhyGEEJ7htQnDpop7GDXKsSS6EEKIm1cFEoYMSQkh\nhCd4bcLQkB6GEEJ4kvcmjJI5jJoy6S2EEB7hvQkDDaXA6CN3egshhCd4ccKwgfLa5gshhNfx2jOu\n0mnoNENlN0MIIaoN700Y0sMQQgiPcvsZd+vWrfTp04fevXuzZMmSMu+vWbOGfv36ERMTQ1xcHEeP\nHnWpXqWThCGEEJ7k1jOupmkkJSWxbNkyNm7ciMlkKpMQ+vXrx4YNG1i/fj3Dhg1jzpw5LtWtdBo6\nJUNSQgjhKW5NGPv27SMsLIzGjRtjNBqJiooiNTXVoUzt2rXtrwsKCtDrXWySTkPnvSNqQgjhddx6\nTWpWVhYNGza0b4eEhLB///4y5VavXs3y5cuxWq2sWLHCtcp1NulhCCGEB7k1YSilXCoXFxdHXFwc\nJpOJxYsXM3fuXOcf0mnodQaCggJusZXeT2JQSmJRSmJRSmJRMdyaMEJDQzl58qR9Oysri+Dg4OuW\n79u3L7NmzXJar02zgQ50Ss/p07kV0lZvFRQUUO1jcJnEopTEopTEotStJk63TgJERESQnp5OZmYm\nFosFk8lEjx49HMocP37c/nrz5s3ccccdTustshUByByGEEJ4kFt7GAaDgRkzZhAfH49SikGDBhEe\nHs6iRYuIiIigW7durFq1irS0NIxGI3Xq1OGVV15xWq+lJGHokTkMIYTwFLcvxBQZGUlkZKTDvsTE\nRPvrF154odx1mq2SMIQQwtO8ckznUpEFkIQhhBCe5JUJw1xUvLS5JAwhhPAcr0wYl6zFPQyDThKG\nEEJ4ilcmDPschiQMIYTwGO9MGCVzGAYZkhJCCI/xzoRhLZ7DMOjkaXtCCOEpXpkwLPY5DEkYQgjh\nKd6ZMGyXexgyJCWEEJ7ilQnDXHKnt49eehhCCOEpXpkwikqukpIehhBCeI5XJozLQ1LSwxBCCM/x\n0oRRPOktCUOI6iMvL4916/7fTX32+efHk5+fV8Etqn68MmEUXe5hyJCUENVGbu5F1q37+JrvaZp2\nw88mJy+gdm1/dzTrlrn6oLnfA6/8iX55eXOjwVjJLRFCeMqbb77GyZOZxMfH8eCD7enYsRPvvvs2\nt90WyJEjv7By5UdMm/Ycp09nY7GYGTx4CP36xQAweHB/li1bSUFBAc89l0hERGv+85+fCAoKYe7c\nf+Lr6+vwXd9+u40VK5ZhtVqpW7cuM2e+TP369SksLOTVV5P5+edD6HR6/vrX4XTp0o3vvtvBkiWL\n0TSNevXqsWDBYt55Zwm1atXi8cefBGDo0MdITl4IKJ57LpE2bR7kwIH9zJkzn5Url/Pzzwcxm810\n7dqD+PgEAA4dOsCiRf+ksPASvr6+LFiwmMmTxzFhwvM0a3YXAKNGDWPy5GnceWczt/8beHfC0EvC\nEKIyfPT1Eb4/nF2hdT7UIphHu1//pDdq1FiOHfuVd95ZDcCePbs5dOggK1d+RGhoKADTp88iICAA\ns9nM8OFD6dKle8lT5nT2ejIyTvDSS3OYMuUFZs6cxpYtX9OrVx+H77r//jYsWbIcgI0b1/P+++8x\nevQ4li9fSkBAACtWrAGKh8lycnJITv4HixcvIzQ0lNzcaz/dT6crbcOJE+m88MLfmDRpCgAjRowm\nICAATdMYN24Uv/56hD/84Q5mzZpOUtIrNG/egoKCAvz8/OjXL4ZNmz4jMXESJ06kY7UWeSRZgJcm\njCKteEjKKHMYQlRrLVu2sicLgI8+ep9t274BIDs7m4yMdMLDGwOlwz4NGzYiPLz4BNu8eQtOnTrJ\n1bKzTzFz5gLOnj2D1WqlYcNGAPzwwy7+/vc59nL+/v58++022rRpa29HQMC1H4N65dBTSEgo99zT\nyr6dmvoFn322HpvNxrlzZ/nf//4HQGBgEM2btwCgVq1aAHTr1oPly5cxevR4TKbP+POf+7kYrVvn\n9jPu1q1bmT17NkopBg4cSEJCgsP7y5cv5+OPP8bHx4cGDRowe/ZsGjZseMM6i0omvWVISojK8Wj3\nZjfsDXhKjRo17K/37NnNjz/+wJIly/H19WXs2BFYLJYyn7ly+EmvN1yzzKuvzmPIkKd4+OHO7Nmz\nm3fffRu49nzD9eYgDAYDmlb63pXfU7NmTfvr3347yZo1q1m2bCW1a/sze/ZLWCxmrje14edXg4ce\nas+2bVvYvPkrli5dee2CbuDWSW9N00hKSmLZsmVs3LgRk8nE0aNHHcq0bNmStWvX8umnn9KrVy+S\nk5Od1nu5h+ErQ1JCVBu1atWioKDguu/n5+cREBCAr68vx48f48CB/1yznCuTzPn5+QQGBgLwr39t\ntO9v164Dn3zyoX07NzeXe++9j71793Dq1G8AXLx4ESjuyfzyy2EAfv75ML/9VtqTubIN+fn51KxZ\nk1q1anPu3Fm++24HAGFhd3D27BkOHz4EQEFBgX1yPzp6AAsWzOeee1pdt0fjDm7tYezbt4+wsDAa\nN24MQFRUFKmpqYSHh9vLtGvXzv66devWbNiwwWm9Vq14DsNXehhCVBt16tQlIuJ+nn76cdq3f5iO\nHTs5vN++/cOsX/8Jf/nLE/zhD2Hce2/EFe+Wzh9cOZdwPfHxw3nxxSkEB4fQsuW99mTw9NPDSEl5\nhaFDH8NgMPDXvyYQGdmV559/genTn0MpRf36DUhJeY0uXbrz+ecm4uPjaNGiJU2ahF2zDc2a3cVd\ndzXnqaceo1Gjxtx33/0A+Pj48NJLc3j11WTMZjM1atRgwYLF1KhRg+bNW1C7dm2iojw3HAWgU268\npuuLL75g+/btJCUlAfDpp5+yf/9+XnzxxWuWT0pKIigoiJEjR96w3omfzifj0lGGhIyhc6s/VHi7\nvUlQUACnT197kq26kViUkliUqoqxOHPmNImJI3n//U/K9bniCwBunlt7GOXJRZ9++ikHDhxg5Urn\n43GXexiB9evccgCqAolBKYlFKYlFqaoUi/Xr17Nw4UKmTZvm8eNya8IIDQ3l5MnScbusrCyCMd1e\n6gAAERdJREFUg4PLlNuxYwdLlixh1apVGI3Oh5msyopSUJhXVOV+OZRXVfz1dLMkFqUkFqWqWiw6\ndepBp049AMp9XLeaYNw66R0REUF6ejqZmZlYLBZMJhM9evRwKHPw4EFmzZrFG2+8Qf369V2q16pZ\nQTPg6yt3egshhKe4tYdhMBiYMWMG8fHxKKUYNGgQ4eHhLFq0iIiICLp168a8efMoLCxk3LhxKKVo\n1KgRixcvvmG9xQlDTw1JGEII4TFuvw8jMjKSyMhIh32JiYn21++++26567SqIpRmoIZREoYQQniK\nVy4+aFNWUHr8pIchhBAe45UJQ8MGmoEavrI0iBDVxa0sbw7w0UcfYDabK7BF1Y+XJgyZwxCiurnR\n8uau+PjjDzCbL1Vgi8rPZrNV6vffKq/8ia50GigDPgavzHdCiJtw9fLmzz6byPvvr2Tz5n9TVGQl\nMrIr8fEJXLp0iZkzp3L6dDaapjF27BiOHcvgzJnTjB07knr16rFw4RsOdS9fvpRvv92GxWLm3nvv\nY/Lk6QBkZmYwb95scnJyMBgMJCXNpVGjxqxevYIvv/wXer2eDh06MWLEaMaOHcGYMRNo3rwFFy7k\n8MwzQ/n448/41782smPHdiwWM5cumZk7959MnTqJvLxcrFYrw4ePpHPnLkDxMiRr1qxGr9cRHn4X\nEydO4emnh7BmzVoMBgMFBfkl2+swGDz/g9krEwaAXknvQojKsvbIRvZk76/QOtsER/BIs+jrvn/1\n8ubff/8dGRnpvP32eyilmDJlIj/9tJecnHMEBgaRnLwAgJo1dTz4oOLDDz/g//7vLerUqVOm7oED\nH+Mvf3kGgKSkmezYsZ2HH+7MSy+9yNChf6Vz5y4UFRWhaRrffbeD7du38vbb7+Hr63vd5cyvXI7k\nwIH9vPfeh/j7+6NpGnPmzKdWrVpcuJDDiBHF9f/661FWrVrOG2+8Q506dcjNzaVWrVq0bfsAaWnb\n6dy5C1999SVdu/aolGQBXpwwDPK0PSGqtV27dvL997uIj49DKUVh4SUyMtK5777WvP76Qt588zU6\nduxMz55/pLAwl+Ilzq+9+sTu3bt4//2VmM2XyM3N5c47w2ndui1nzpy2//q/fFPxDz/sIiqqn33V\nW1cW/3voofb4+xc/8U/TNN566zX27t2DXq/jzJnTnD9/jj17fqBr1x72hHa53ujoAbz//ko6d+7C\npk0bmDLl2ksreYLXJgy9ThYeFKKyPNIs+oa9AU9QSvHUU3+hf//YMu8tW7aKtLRveeut1/jll/0M\nHvzUdeuxWCykpCTzzjurCAwM4p13lpQsRX7t5FK85FHZBQwNBgNKafY6r3Tlcub//vfn5OTk8O67\nq9Hr9Qwe3B+z2XLdpZQiIu7n1KlX2Lv3RzRNo2nTO697LO7mtZMAvqqm80JCiCrj6uXN27fvgMn0\nGYWFhQAlv9TPc+bMGfz8/OjVqw9DhjzJwYMHSz5fm/z8/DL1WiwWdLri1XALCgrYsiXVXj44OIRt\n27YAUFRUhNl8iXbtir/38gR66XLmjTl8uPi7Nm/+6rrHkZeXR/36DdDr9fz44w/2lXAfeKAdmzd/\nxcWLFxzqBejduy9/+9sLREX1L3/gKpBX9jDMhx/iD/XvqOxmCCE86OrlzZ99NpFjx44xcuRfgeKE\nMmNGEhkZJ3j99YXo9Tp8fIz84x/Fq2X37x/Dc88lEhgY5DDp7e/vT79+sQwd+hgNGzZyeBLeiy++\nxLx5s1m69C2MRiNJSXNp374jR478wrBhQ/H1NdKhQycSEp5lyJA4ZsyYxhdf/IsHHnjousfRq1cf\npkyZyPDhQ2nWrDlhYU0BaNr0ToYOjWfMmAQMBgN33dWc6dNnlXzmzyxd+iY9e/aq8LiWh1uXN3eX\nfpM+pc1dgYwdeF9lN6XSVbWF1W6FxKKUxKJUVYjF5s1f8e2323jxxZduqZ7f9fLm7iR3eQshqoMF\nC+bx3XdpzJ+/sLKb4r0JI6Cmr/NCQgjh5caPn1zZTbDz2knv+gF+ld0EIYSoVrw2YdTzlx6GEEJ4\nktcmDOlhCCGEZ7k9YWzdupU+ffrQu3dvlixZUub9H374gUceeYRWrVrx5ZdfulxvPX9JGEII4Ulu\nTRiappGUlMSyZcvYuHEjJpOJo0ePOpRp1KgRc+fOpV+/fuWqu570MIQQwqPcepXUvn37CAsLo3Hj\nxgBERUWRmppKeHi4vUyjRo0A0OnK3mp/PQ1vq42fPG1PCCE8yq09jKysLBo2bGjfDgkJITs7+5br\nfXVCl1uuQwghRPm4NWG46yby2jVl4UEhhPA0tw5JhYaGcvLkSft2VlYWwcHBFVL3rd7iXpVILEpJ\nLEpJLEpJLCqGW3sYERERpKenk5mZicViwWQy0aNHj+uW98JlrYQQotpw++KDW7du5R//+AdKKQYN\nGkRCQgKLFi0iIiKCbt26sX//fsaMGcPFixfx8/MjKCiIDRs2uLNJQgghboJXrlYrhBDC87z2Tm8h\nhBCeJQlDCCGESyRhCCGEcInXJQxna1NVNdOnT+fhhx92WDrlwoULxMfH07t3b4YNG0ZubunTxF5+\n+WV69erFgAEDOHToUGU02S1OnTrF0KFD6du3L/369eO9994DqmcsLBYLgwcPJiYmhn79+vHaa68B\nkJGRwaOPPkrv3r2ZOHEiVqvVXn7ChAn06tWLxx57zOFS96pC0zRiY2MZOXIkUH1j0b17d/r3709M\nTAyDBg0CKvhvRHkRm82mevbsqTIyMpTFYlH9+/dXR44cqexmudX333+vDh48qKKjo+37kpOT1ZIl\nS5RSSr311ltq3rx5SimltmzZooYPH66UUmrv3r1q8ODBnm+wm2RnZ6uDBw8qpZTKy8tTvXr1UkeO\nHKmWsVBKqYKCAqWUUlarVQ0ePFjt3btXjRs3Tm3atEkppdTMmTPVBx98oJRSavXq1WrWrFlKKaVM\nJpMaP358pbTZnd599101adIkNWLECKWUqrax6N69u8rJyXHYV5F/I17Vw7hybSqj0Whfm6oqe/DB\nB6lTp47DvtTUVGJjYwGIjY21xyA1NZWYmBgA7r//fnJzczlz5oxnG+wmQUFB3HPPPQDUrl2b8PBw\nsrKyqmUsAGrWrAkU/2K2Wq3odDp27txJ7969geJYfPXVV4Dj/5fevXuTlpZWOY12k1OnTvHNN98w\nePBg+77vvvuuWsZCKYWmaQ77KvJvxKsShrvWpvI2586dIzAwECg+kZ47dw6A7OxsQkND7eVCQkLI\nysqqlDa6U0ZGBocPH+b+++/n7Nmz1TIWmqYRExNDp06d6NSpE02aNKFOnTro9cV/0qGhofbjvTIW\nBoOBOnXqkJOTU2ltr2izZ8/m+eefty9gev78eerWrVstY6HT6Rg2bBgDBw7k448/BqjQvxGveqa3\nkltGbuha8SnPKsDeID8/n8TERKZPn07t2rWve3xVPRZ6vZ7169eTl5fH6NGjyzw2AEqP9+pYKKWq\nTCy2bNlCYGAg99xzDzt37gSKj+/qY64OsQBYs2aNPSnEx8fTtGnTCv0b8aqE4c61qbzJbbfdxpkz\nZwgMDOT06dM0aNAAKP6FcOrUKXu5U6dOVan4WK1WEhMTGTBgAD179gSqbywu8/f356GHHuKnn37i\n4sWLaJqGXq93ON7LsQgJCcFms5GXl0fdunUrueUV48cff+Trr7/mm2++wWw2k5+fz+zZs8nNza12\nsYDiHgRAgwYN6NmzJ/v27avQvxGvGpIq79pUVcXVvwS6d+/O2rVrAVi3bp09Bj169GD9+vUA7N27\nlzp16ti7olXB9OnTadasGU8//bR9X3WMxblz5+xXuly6dIm0tDSaNWtG+/bt+fzzzwHHWHTv3p11\n69YB8Pnnn9OhQ4fKabgbTJw4kS1btpCamkpKSgrt27dn/vz51TIWhYWF5OfnA1BQUMD27du5++67\nK/RvxOuWBrnW2lRV2aRJk9i5cyc5OTkEBgYyduxYevbsybhx4/jtt99o1KgRCxcutE+M//3vf2fb\ntm3UrFmTOXPm0KpVq0o+goqxe/dunnzySe6++250Oh06nY4JEyZw3333MX78+GoVi59//pmpU6ei\naRqaptG3b19GjRrFiRMnmDhxIhcvXuSee+5h3rx5GI1GLBYLkydP5tChQ9SrV4+UlBRuv/32yj6M\nCrdr1y7eeecd3nzzzWoZixMnTjBmzBh0Oh02m41+/fqRkJBATk5Ohf2NeF3CEEIIUTm8akhKCCFE\n5ZGEIYQQwiWSMIQQQrhEEoYQQgiXSMIQQgjhEkkYQgghXCIJQ3i1Rx99lNjYWKKiomjVqhWxsbHE\nxsYyffr0ctf1zDPPuLTc9bRp09i7d+/NNLdcDh48yBdffOH27xHCVXIfhqgSMjMzGTRo0A1XH728\nVIS3+Pjjj0lLSyMlJaWymyIE4GVrSQlRHmlpacybN4/WrVtz8OBBRo8ezblz51i9erX9gTpTp06l\nXbt2AHTp0oXly5fTtGlTnnjiCdq0acOePXvIzs4mOjqa8ePHA/DEE0/w7LPP0rlzZyZPnoy/vz9H\njx4lKyuLtm3bMmfOHKB4bZ7nn3+e8+fP06RJE2w2G927d+exxx5zaOeZM2eYNGkS58+fB6Bz5848\n88wzLF68mIKCAmJjY2nfvj1Tp05lz549pKSkUFhYCEBiYiKRkZGkp6fzxBNPEB0dze7du7FYLMya\nNYu2bdt6JNaimriVh3UI8XuRkZGhOnTo4LBvx44dqmXLlmr//v32fVc+XObIkSOqa9eu9u3IyEj1\n66+/KqWUGjJkiJo0aZJSSqmLFy+qdu3aqYyMDPt727ZtU0op9dxzz6knn3xSFRUVKbPZrPr06aN2\n7typlFJq1KhR6u2331ZKKXXixAnVpk0btWbNmjJtX7p0qZo5c6Z9++LFi0oppT766CM1ceJEh7bH\nxMSos2fPKqWUOnXqlIqMjFR5eXnq+PHjqnnz5spkMtmPvWvXrspqtboeRCGckB6GqNLuvPNO7r33\nXvv2sWPHWLRoEdnZ2RgMBrKzs8nJyaFevXplPvvnP/8ZgICAAJo2bUp6ejqNGzcuU+5Pf/oTPj7F\nf0otW7YkPT2ddu3asXPnTl5++WUAbr/9dntP5mqtW7dm1apVzJ8/n4ceeojOnTtfs9zu3bvJyMhg\n2LBh9gUpDQYDJ06coFatWtSsWZO+ffsC0LFjRwwGA8eOHSM8PNzVcAlxQ5IwRJVWu3Zth+0JEyYw\na9YsunTpgqZp3HfffZjN5mt+1s/Pz/5ar9djs9nKVc7V5yw88MADrFu3jh07dvDJJ5+wdOlSVq5c\nWaacUopWrVqxfPnyMu+lp6eX2adpWpV61oOofN4zAyiEE8qF6zfy8vLsq5OuWbPmukmgIrRr186+\nrHRmZia7du26ZrmMjAz8/f3p27cvU6dO5T//+Q9Q/KyLy8uYA7Rt25YjR47www8/2Pft27fP/rqw\nsJBNmzYBxY8oBQgLC6vYgxLVmvQwRJXhyq/p6dOnk5CQQMOGDWnfvj0BAQHX/PzVdV3vvRuVmzFj\nBlOmTMFkMnHnnXfStm1bh++7LC0tjffeew+DwYBSiqSkJAA6derEihUriImJoUOHDkydOpXFixcz\nb948cnNzKSoqokmTJrz55psABAYG8t///pfBgwdjsVhISUnBYDA4jYkQrpLLaoVwE7PZjNFoRK/X\nk5WVxeDBg1m9ejVNmjSp8O+6fJXU9u3bK7xuIS6THoYQbvLrr78ybdo0lFJomsaECRPckiyE8BTp\nYQghhHCJTHoLIYRwiSQMIYQQLpGEIYQQwiWSMIQQQrhEEoYQQgiXSMIQQgjhkv8PZHg4l1eLyCQA\nAAAASUVORK5CYII=\n",
- "text/plain": [
- "\u003cmatplotlib.figure.Figure at 0x7f96f7389490\u003e"
- ]
- },
- "metadata": {
- "tags": []
- },
- "output_type": "display_data"
- }
- ],
- "source": [
- "with context.eager_mode():\n",
- " durations = []\n",
- " for t in range(burn_ins + trials):\n",
- " hp = tf.contrib.training.HParams(\n",
- " learning_rate=0.05,\n",
- " max_steps=max_steps,\n",
- " )\n",
- " train_ds = setup_mnist_data(True, hp, 500)\n",
- " test_ds = setup_mnist_data(False, hp, 100)\n",
- " ds = tf.data.Dataset.zip((train_ds, test_ds))\n",
- " start = time.time()\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = train(ds, hp)\n",
- " if t \u003c burn_ins:\n",
- " continue\n",
- " train_losses[-1].numpy()\n",
- " test_losses[-1].numpy()\n",
- " train_accuracies[-1].numpy()\n",
- " test_accuracies[-1].numpy()\n",
- " duration = time.time() - start\n",
- " durations.append(duration)\n",
- " print('Duration:', duration)\n",
- "\n",
- "\n",
- " print('Mean duration:', np.mean(durations), '+/-', np.std(durations))\n",
- " plt.title('MNIST train/test losses')\n",
- " plt.plot(train_losses, label='train loss')\n",
- " plt.plot(test_losses, label='test loss')\n",
- " plt.legend()\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Loss')\n",
- " plt.show()\n",
- " plt.title('MNIST train/test accuracies')\n",
- " plt.plot(train_accuracies, label='train accuracy')\n",
- " plt.plot(test_accuracies, label='test accuracy')\n",
- " print('test_accuracy', test_accuracies[-1])\n",
- " plt.legend(loc='lower right')\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Accuracy')\n",
- " plt.show()\n"
- ]
- }
- ],
- "metadata": {
- "colab": {
- "collapsed_sections": [],
- "default_view": {},
- "last_runtime": {
- "build_target": "",
- "kind": "local"
- },
- "name": "Autograph vs. Eager MNIST benchmark",
- "provenance": [
- {
- "file_id": "1tAQW5tHUgAc8M4-iwwJm6Xs6dV9nEqtD",
- "timestamp": 1530297010607
- },
- {
- "file_id": "18dCjshrmHiPTIe1CNsL8tnpdGkuXgpM9",
- "timestamp": 1530289467317
- },
- {
- "file_id": "1DcfimonWU11tmyivKBGVrbpAl3BIOaRG",
- "timestamp": 1522272821237
- },
- {
- "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
- "timestamp": 1522238054357
- },
- {
- "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
- "timestamp": 1521743157199
- },
- {
- "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
- "timestamp": 1520522344607
- }
- ],
- "version": "0.3.2",
- "views": {}
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb b/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
index 0702273fac..a3109fa5db 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
@@ -1,49 +1,20 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "name": "Dev Summit 2018 - Autograph",
- "version": "0.3.2",
- "views": {},
- "default_view": {},
- "provenance": [
- {
- "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
- "timestamp": 1522238054357
- },
- {
- "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
- "timestamp": 1521743157199
- },
- {
- "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
- "timestamp": 1520522344607
- }
- ],
- "collapsed_sections": []
- },
- "kernelspec": {
- "name": "python2",
- "display_name": "Python 2"
- }
- },
"cells": [
{
+ "cell_type": "markdown",
"metadata": {
- "id": "g7nGs4mzVUHP",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "g7nGs4mzVUHP"
},
- "cell_type": "markdown",
"source": [
- "# Experimental: TF Autograph\n",
+ "# Experimental: TF AutoGraph\n",
"**TensorFlow Dev Summit, 2018.**\n",
"\n",
- "This interactive notebook demonstrates **autograph**, an experimental source-code transformation library to automatically convert TF.Eager and Python code to TensorFlow graphs.\n",
+ "This interactive notebook demonstrates **AutoGraph**, an experimental source-code transformation library to automatically convert Python, TensorFlow and NumPy code to TensorFlow graphs.\n",
"\n",
"**Note: this is pre-alpha software!** The notebook works best with Python 2, for now.\n",
"\n",
- "> ![alt text](https://lh3.googleusercontent.com/QOvy0clmg7siaVKzwmSPAjicWWNQ0OeyaB16plDjSJMf35WD3vLjF6mz4CGrhSHw60HnlZPJjkyDCBzw5XOI0oBGSewyYw=s688)\n",
+ "\u003e ![alt text](https://lh3.googleusercontent.com/QOvy0clmg7siaVKzwmSPAjicWWNQ0OeyaB16plDjSJMf35WD3vLjF6mz4CGrhSHw60HnlZPJjkyDCBzw5XOI0oBGSewyYw=s688)\n",
"\n",
"### Table of Contents\n",
"1. _Write Eager code that is fast and scalable._\n",
@@ -53,37 +24,39 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "uFcgBENZqkB2",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "uFcgBENZqkB2"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"# Install TensorFlow; note that Colab notebooks run remotely, on virtual\n",
"# instances provided by Google.\n",
"!pip install -U -q tf-nightly"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "Pa2qpEmoVOGe",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "Pa2qpEmoVOGe"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"import os\n",
"import time\n",
@@ -96,170 +69,172 @@
"import six\n",
"\n",
"from google.colab import widgets"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "ZVKfj5ttVkqz",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "ZVKfj5ttVkqz"
},
- "cell_type": "markdown",
"source": [
"# 1. Write Eager code that is fast and scalable\n",
"\n",
"TF.Eager gives you more flexibility while coding, but at the cost of losing the benefits of TensorFlow graphs. For example, Eager does not currently support distributed training, exporting models, and a variety of memory and computation optimizations.\n",
"\n",
- "Autograph gives you the best of both worlds: write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported."
+ "AutoGraph gives you the best of both worlds: you can write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported."
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "snaZRFdWd9ym",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "snaZRFdWd9ym"
},
- "cell_type": "markdown",
"source": [
- "For example, autograph can convert a function like this:"
+ "For example, AutoGraph can convert a function like this:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "9__n8cSIeDnD",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "9__n8cSIeDnD"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def g(x):\n",
- " if x > 0:\n",
+ " if x \u003e 0:\n",
" x = x * x\n",
" else:\n",
" x = 0\n",
" return x"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "gq0eQcuReHET",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "gq0eQcuReHET"
},
- "cell_type": "markdown",
"source": [
"... into a TF graph-building function:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "sELSn599ePUF",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 413
+ "height": 431
},
- "outputId": "bb0c7216-1ca3-4da1-d1fb-589902cdcd1a",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 69,
"status": "ok",
- "timestamp": 1522345737505,
- "user_tz": 240,
- "elapsed": 243,
+ "timestamp": 1531750911837,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "sELSn599ePUF",
+ "outputId": "2858bde5-ae05-4c32-be01-7770ac914f02"
},
- "cell_type": "code",
- "source": [
- "print(autograph.to_code(g))"
- ],
- "execution_count": 0,
"outputs": [
{
+ "name": "stdout",
"output_type": "stream",
"text": [
"from __future__ import print_function\n",
"import tensorflow as tf\n",
- "from tensorflow.contrib.autograph.impl import api as autograph_api\n",
- "from tensorflow.contrib.autograph import utils as autograph_utils\n",
"\n",
"def tf__g(x):\n",
- " with tf.name_scope('g'):\n",
+ " try:\n",
+ " with tf.name_scope('g'):\n",
"\n",
- " def if_true():\n",
- " with tf.name_scope('if_true'):\n",
- " x_1, = x,\n",
- " x_1 = x_1 * x_1\n",
- " return x_1,\n",
+ " def if_true():\n",
+ " with tf.name_scope('if_true'):\n",
+ " x_1, = x,\n",
+ " x_1 = x_1 * x_1\n",
+ " return x_1,\n",
"\n",
- " def if_false():\n",
- " with tf.name_scope('if_false'):\n",
- " x_1, = x,\n",
- " x_1 = 0\n",
- " return x_1,\n",
- " x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
- " return x\n",
+ " def if_false():\n",
+ " with tf.name_scope('if_false'):\n",
+ " x_2, = x,\n",
+ " x_2 = 0\n",
+ " return x_2,\n",
+ " x = ag__.utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
+ " return x\n",
+ " except:\n",
+ " ag__.rewrite_graph_construction_error(ag_source_map__)\n",
"\n"
- ],
- "name": "stdout"
+ ]
}
+ ],
+ "source": [
+ "print(autograph.to_code(g))"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "j74n-8hEe6dk",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "j74n-8hEe6dk"
},
- "cell_type": "markdown",
"source": [
"You can then use the converted function as you would any regular TF op -- you can pass `Tensor` arguments and it will return `Tensor`s:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "AkVaY0-dfEbH",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 53
},
- "outputId": "4ffe3757-c44d-424c-c2a8-7ddc973bfcce",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 83,
"status": "ok",
- "timestamp": 1522345737841,
- "user_tz": 240,
- "elapsed": 257,
+ "timestamp": 1531750911965,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "AkVaY0-dfEbH",
+ "outputId": "f04541ad-b1d3-4663-bf27-4d902648283d"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "g(9) = 81\n",
+ "tf_g(9) = 81\n"
+ ]
+ }
+ ],
"source": [
"tf_g = autograph.to_graph(g)\n",
"\n",
@@ -272,77 +247,72 @@
"\n",
" print('g(9) = %s' % g(9))\n",
" print('tf_g(9) = %s' % tf_g_result)"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "g(9) = 81\n",
- "tf_g(9) = 81\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "trrHQBM1VnD0",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "trrHQBM1VnD0"
},
- "cell_type": "markdown",
"source": [
"# 2. Case study: complex control flow\n",
"\n",
- "Autograph can convert a large chunk of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in autograph.\n",
- "Autograph will automatically convert most Python control flow statements into their correct graph equivalent.\n",
+ "Autograph can convert a large subset of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in AutoGraph.\n",
+ "AutoGraph will automatically convert most Python control flow statements into their graph equivalent.\n",
" "
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "u0YG3DPgZxoW",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "u0YG3DPgZxoW"
},
- "cell_type": "markdown",
"source": [
"We support common statements like `while`, `for`, `if`, `break`, `return` and more. You can even nest them as much as you like. Imagine trying to write the graph version of this code by hand:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "xJYDzOcrZ8pI",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 35
},
- "outputId": "6c244ee4-b141-4ad6-eefa-cfffa71f33c6",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 169,
"status": "ok",
- "timestamp": 1522345738402,
- "user_tz": 240,
- "elapsed": 483,
+ "timestamp": 1531750912183,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "xJYDzOcrZ8pI",
+ "outputId": "f392b475-bf87-4d90-919d-44f895ee9fc7"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Sum of even numbers: 42\n"
+ ]
+ }
+ ],
"source": [
"def sum_even(numbers):\n",
" s = 0\n",
" for n in numbers:\n",
- " if n % 2 > 0:\n",
+ " if n % 2 \u003e 0:\n",
" continue\n",
" s += n\n",
" return s\n",
@@ -358,77 +328,74 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(sum_even))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "Sum of even numbers: 42\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "_YXo4KOcbKrn",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "_YXo4KOcbKrn"
},
- "cell_type": "markdown",
"source": [
"Try replacing the `continue` in the above code with `break` -- Autograph supports that as well!"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "xHmC0rBIavW_",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "xHmC0rBIavW_"
},
- "cell_type": "markdown",
"source": [
"The Python code above is much more readable than the matching graph code. Autograph takes care of tediously converting every piece of Python code into the matching TensorFlow graph version for you, so that you can quickly write maintainable code, but still benefit from the optimizations and deployment benefits of graphs."
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "UEHWGpBXbS7g",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "UEHWGpBXbS7g"
},
- "cell_type": "markdown",
"source": [
"Let's try some other useful Python constructs, like `print` and `assert`. We automatically convert Python `assert` statements into the equivalent `tf.Assert` code. "
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "qUU57xlEbauI",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 53
},
- "outputId": "add3db4a-2077-4dd5-f7a7-a5b5a4529c26",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 56,
"status": "ok",
- "timestamp": 1522345738697,
- "user_tz": 240,
- "elapsed": 253,
+ "timestamp": 1531750912292,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "qUU57xlEbauI",
+ "outputId": "c9cd536a-4a95-4eb0-98c0-aafce5d79580"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Got error message: assertion failed: [Do not pass zero!]\n",
+ "\t [[Node: f/Assert/Assert = Assert[T=[DT_STRING], summarize=3, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](f/NotEqual, f/Assert/Assert/data_0)]]\n"
+ ]
+ }
+ ],
"source": [
"def f(x):\n",
" assert x != 0, 'Do not pass zero!'\n",
@@ -444,61 +411,35 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(f))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "Got error message: assertion failed: [Do not pass zero!]\n",
- "\t [[Node: f/Assert/Assert = Assert[T=[DT_STRING], summarize=3, _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"](f/NotEqual, f/Assert/Assert/data_0)]]\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "w5hBZaVJbck4",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "w5hBZaVJbck4"
},
- "cell_type": "markdown",
"source": [
"You can also use `print` functions in-graph:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "6NdzRKLEboRv",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
- },
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 35
- },
- "outputId": "fb82dfc3-790f-4127-87f6-361805be9e9b",
- "executionInfo": {
- "status": "ok",
- "timestamp": 1522345739013,
- "user_tz": 240,
- "elapsed": 247,
- "user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
}
- }
+ },
+ "colab_type": "code",
+ "id": "6NdzRKLEboRv"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def print_sign(n):\n",
- " if n >= 0:\n",
+ " if n \u003e= 0:\n",
" print(n, 'is positive!')\n",
" else:\n",
" print(n, 'is negative!')\n",
@@ -512,62 +453,58 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(print_sign))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "1 is positive!\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "9u_Z3i3AivLA",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "9u_Z3i3AivLA"
},
- "cell_type": "markdown",
"source": [
- "We can convert lists to TensorArray, so appending to lists also works, with a few modifications:"
+ "Appending to lists also works, with a few modifications:"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "MjhCQJVuiTNR",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 35
},
- "outputId": "dc320b87-595b-4392-d29c-994486fd8a0a",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 148,
"status": "ok",
- "timestamp": 1522345744470,
- "user_tz": 240,
- "elapsed": 5391,
+ "timestamp": 1531750912595,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "MjhCQJVuiTNR",
+ "outputId": "96bf9131-c7c1-4359-ee82-9c38575e7ab4"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[0 1 2 3 4]\n"
+ ]
+ }
+ ],
"source": [
"def f(n):\n",
" numbers = []\n",
" # We ask you to tell us about the element dtype.\n",
- " autograph.utils.set_element_type(numbers, tf.int32)\n",
+ " autograph.set_element_type(numbers, tf.int32)\n",
" for i in range(n):\n",
" numbers.append(i)\n",
" return autograph.stack(numbers) # Stack the list so that it can be used as a Tensor\n",
@@ -580,65 +517,62 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(f))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "[0 1 2 3 4]\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "UdG8ZFrkTAF2",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "UdG8ZFrkTAF2"
},
- "cell_type": "markdown",
"source": [
"And all of these functionalities, and more, can be composed into more complicated code:\n"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "DVs6wt8NKaGQ",
- "colab_type": "code",
+ "cellView": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {}
- ],
- "base_uri": "https://localhost:8080/",
"height": 53
},
- "cellView": "code",
- "outputId": "0a4b8d08-8f65-4bbc-85ba-dc4c60563519",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 555,
"status": "ok",
- "timestamp": 1522345745186,
- "user_tz": 240,
- "elapsed": 658,
+ "timestamp": 1531750913176,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "DVs6wt8NKaGQ",
+ "outputId": "8729229c-4f08-4640-d3a1-0d3f9c697a87"
},
- "cell_type": "code",
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The prime numbers less than 50 are:\n",
+ "[ 2 3 5 7 11 13 17 19 23 29 31 37 41 43 47]\n"
+ ]
+ }
+ ],
"source": [
"def print_primes(n):\n",
" \"\"\"Returns all the prime numbers less than n.\"\"\"\n",
- " assert n > 0\n",
+ " assert n \u003e 0\n",
" \n",
" primes = []\n",
- " autograph.utils.set_element_type(primes, tf.int32)\n",
+ " autograph.set_element_type(primes, tf.int32)\n",
" for i in range(2, n):\n",
" is_prime = True\n",
" for k in range(2, i):\n",
@@ -663,45 +597,36 @@
" \n",
"# Uncomment the line below to print the generated graph code\n",
"# print(autograph.to_code(print_primes))"
- ],
- "execution_count": 0,
- "outputs": [
- {
- "output_type": "stream",
- "text": [
- "The prime numbers less than 50 are:\n",
- "[ 2 3 5 7 11 13 17 19 23 29 31 37 41 43 47]\n"
- ],
- "name": "stdout"
- }
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "JQ8kQT99VqDk",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "JQ8kQT99VqDk"
},
- "cell_type": "markdown",
"source": [
"# 3. Case study: training MNIST with Keras\n",
"\n",
- "As we've seen, writing control flow in Autograph is easy. So running a training loop in graph should be easy as well!\n",
+ "As we've seen, writing control flow in AutoGraph is easy. So running a training loop in graph should be easy as well!\n",
"\n",
"Here, we show an example of such a training loop for a simple Keras model that trains on MNIST."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "0CrtGWgwuLJr",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "0CrtGWgwuLJr"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"import gzip\n",
"import shutil\n",
@@ -754,66 +679,67 @@
"\n",
"def mnist_test(directory):\n",
" return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "2zu1U9Nqir6L",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "2zu1U9Nqir6L"
},
- "cell_type": "markdown",
"source": [
"First, we'll define a small three-layer neural network using the Keras API"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "x_MU13boiok2",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "x_MU13boiok2"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def mlp_model(input_shape):\n",
- " model = tf.keras.Sequential([\n",
+ " model = tf.keras.Sequential((\n",
" tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n",
" tf.keras.layers.Dense(100, activation='relu'),\n",
- " tf.keras.layers.Dense(10, activation='softmax')])\n",
+ " tf.keras.layers.Dense(10, activation='softmax'),\n",
+ " ))\n",
" model.build()\n",
" return model"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "Wuqg3H8mi0Xj",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "Wuqg3H8mi0Xj"
},
- "cell_type": "markdown",
"source": [
"Let's connect the model definition (here abbreviated as `m`) to a loss function, so that we can train our model."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "W51sfbONiz_5",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "W51sfbONiz_5"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def predict(m, x, y):\n",
" y_p = m(x)\n",
@@ -822,63 +748,63 @@
" accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n",
" accuracy = tf.reduce_mean(accuracies)\n",
" return l, accuracy"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "035tNWQki9tr",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "035tNWQki9tr"
},
- "cell_type": "markdown",
"source": [
"Now the final piece of the problem specification (before loading data, and clicking everything together) is backpropagating the loss through the model, and optimizing the weights using the gradient."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "CsAD0ajbi9iZ",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "CsAD0ajbi9iZ"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def fit(m, x, y, opt):\n",
" l, accuracy = predict(m, x, y)\n",
" opt.minimize(l)\n",
" return l, accuracy"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "PcVRIacKjSwb",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "PcVRIacKjSwb"
},
- "cell_type": "markdown",
"source": [
"These are some utility functions to download data and generate batches for training"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "RVw57HdTjPzi",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "RVw57HdTjPzi"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def setup_mnist_data(is_training, hp, batch_size):\n",
" if is_training:\n",
@@ -896,16 +822,14 @@
" x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n",
" y = tf.one_hot(tf.squeeze(label), 10)\n",
" return x, y"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "2zEJH5XNjgFz",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "2zEJH5XNjgFz"
},
- "cell_type": "markdown",
"source": [
"This function specifies the main training loop. We instantiate the model (using the code above), instantiate an optimizer (here we'll use SGD with momentum, nothing too fancy), and we'll instantiate some lists to keep track of training and test loss and accuracy over time.\n",
"\n",
@@ -913,33 +837,35 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "UUI0566FjZPx",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "UUI0566FjZPx"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def train(train_ds, test_ds, hp):\n",
" m = mlp_model((28 * 28,))\n",
" opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
+ "\n",
" train_losses = []\n",
- " train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
+ " autograph.set_element_type(train_losses, tf.float32)\n",
" test_losses = []\n",
- " test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
+ " autograph.set_element_type(test_losses, tf.float32)\n",
" train_accuracies = []\n",
- " train_accuracies = autograph.utils.set_element_type(train_accuracies,\n",
- " tf.float32)\n",
+ " autograph.set_element_type(train_accuracies, tf.float32)\n",
" test_accuracies = []\n",
- " test_accuracies = autograph.utils.set_element_type(test_accuracies,\n",
- " tf.float32)\n",
- " i = tf.constant(0)\n",
- " while i < hp.max_steps:\n",
+ " autograph.set_element_type(test_accuracies, tf.float32)\n",
+ "\n",
+ " i = 0\n",
+ " while i \u003c hp.max_steps:\n",
" train_x, train_y = get_next_batch(train_ds)\n",
" test_x, test_y = get_next_batch(test_ds)\n",
" step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
@@ -956,173 +882,144 @@
" return (autograph.stack(train_losses), autograph.stack(test_losses),\n",
" autograph.stack(train_accuracies),\n",
" autograph.stack(test_accuracies))"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "cYiUQ1ppkHzk",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "cYiUQ1ppkHzk"
},
- "cell_type": "markdown",
"source": [
"Everything is ready to go, let's train the model and plot its performance!"
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "K1m8TwOKjdNd",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {},
- {},
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 988
+ "height": 585
},
- "outputId": "f9d3eef3-5bea-45c1-ddf9-4edee73e4436",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 17094,
"status": "ok",
- "timestamp": 1522345800262,
- "user_tz": 240,
- "elapsed": 52391,
+ "timestamp": 1531750930585,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "K1m8TwOKjdNd",
+ "outputId": "9f63da19-c3bf-498b-cf00-29090bf3b4f0"
},
- "cell_type": "code",
- "source": [
- "with tf.Graph().as_default():\n",
- " hp = tf.contrib.training.HParams(\n",
- " learning_rate=0.05,\n",
- " max_steps=500,\n",
- " )\n",
- " train_ds = setup_mnist_data(True, hp, 50)\n",
- " test_ds = setup_mnist_data(False, hp, 1000)\n",
- " tf_train = autograph.to_graph(train)\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = tf_train(train_ds, test_ds, hp)\n",
- "\n",
- " with tf.Session() as sess:\n",
- " sess.run(tf.global_variables_initializer())\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = sess.run([train_losses, test_losses, train_accuracies,\n",
- " test_accuracies])\n",
- " plt.title('MNIST train/test losses')\n",
- " plt.plot(train_losses, label='train loss')\n",
- " plt.plot(test_losses, label='test loss')\n",
- " plt.legend()\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Loss')\n",
- " plt.show()\n",
- " plt.title('MNIST train/test accuracies')\n",
- " plt.plot(train_accuracies, label='train accuracy')\n",
- " plt.plot(test_accuracies, label='test accuracy')\n",
- " plt.legend(loc='lower right')\n",
- " plt.xlabel('Training step')\n",
- " plt.ylabel('Accuracy')\n",
- " plt.show()"
- ],
- "execution_count": 0,
"outputs": [
{
- "output_type": "stream",
- "text": [
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-images-idx3-ubyte.gz to /tmp/autograph_mnist_data/train-images-idx3-ubyte.gz\n",
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/train-labels-idx1-ubyte.gz to /tmp/autograph_mnist_data/train-labels-idx1-ubyte.gz\n",
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-images-idx3-ubyte.gz to /tmp/autograph_mnist_data/t10k-images-idx3-ubyte.gz\n",
- "Downloading https://storage.googleapis.com/cvdf-datasets/mnist/t10k-labels-idx1-ubyte.gz to /tmp/autograph_mnist_data/t10k-labels-idx1-ubyte.gz\n",
- "Step 0 train loss: 2.244329 test loss: 2.2499208 train accuracy: 0.12 test accuracy: 0.161\n",
- "Step 50 train loss: 0.64771986 test loss: 0.56013924 train accuracy: 0.82 test accuracy: 0.836\n",
- "Step 100 train loss: 0.49011207 test loss: 0.42143965 train accuracy: 0.84 test accuracy: 0.879\n",
- "Step 150 train loss: 0.3768609 test loss: 0.39319593 train accuracy: 0.88 test accuracy: 0.883\n",
- "Step 200 train loss: 0.36007702 test loss: 0.37089333 train accuracy: 0.9 test accuracy: 0.881\n",
- "Step 250 train loss: 0.182115 test loss: 0.28543878 train accuracy: 0.94 test accuracy: 0.915\n",
- "Step 300 train loss: 0.2119576 test loss: 0.22305593 train accuracy: 0.92 test accuracy: 0.93\n",
- "Step 350 train loss: 0.12932214 test loss: 0.29057172 train accuracy: 0.96 test accuracy: 0.906\n",
- "Step 400 train loss: 0.22937602 test loss: 0.2200287 train accuracy: 0.92 test accuracy: 0.925\n",
- "Step 450 train loss: 0.23444137 test loss: 0.19857481 train accuracy: 0.94 test accuracy: 0.94\n"
- ],
- "name": "stdout"
- },
- {
- "output_type": "display_data",
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe8AAAFnCAYAAACPasF4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzs3XmAFNW9Pvynlt5mYdhmQMHggnGN\nS9zCD0ElKug1edUY9ZoQTYze3GuiRk1uYjRqRHNj4n5NrhKjiUYlbihGQFRUFDSoKIvgICAO6+xL\n711V5/2jlq7qZaZnpnumZ3g+/zjTXV1dXSP91PecU+dIQggBIiIiGjLkwT4AIiIi6h2GNxER0RDD\n8CYiIhpiGN5ERERDDMObiIhoiGF4ExERDTEMb6JeOOigg3DllVdmPf6rX/0KBx10kGe766+/3rPN\ne++9h9mzZwMAtm3bhkMPPdR57osvvsCPfvQjzJw5EzNnzsTZZ5+NV199FQBw0003YdasWZg1axYO\nO+wwnHLKKc7v4XDY8x7JZBLz58/v9edavXo1Lr300oK2XbBgAebMmdPn97J19/rZs2fjhRde6PO+\niYY7hjdRL3366aee0Ewmk1izZk3WditXrsQnn3xS0D6vu+46TJs2DYsXL8bixYtxyy234LrrrsPO\nnTtxyy23YNGiRVi0aBHGjRuH3//+987vVVVVnv188sknfQrUI444Ag8//HBB2y5fvhxTpkzp83vZ\n+vt6oj0Zw5uol0444QQsWbLE+f3tt9/GV77ylaztrrnmGtx+++0F7bO+vh5HHnmk8/uRRx6JxYsX\nY/z48QUfV3NzM3784x/jo48+wkUXXQTAbAF48MEHMXPmTOi6jlWrVuHcc8/FrFmzcOaZZ2L58uUA\nzFaB0047DQBw//334ze/+Q2uuOIKfP3rX8d5552HxsZG533ee+89HHzwwVnv9cEHH+Bb3/oWTjvt\nNJx//vloaGgAAOzevRsXX3wxzjzzTJx66qm4++67cx5rPu+99x7OOecczJo1C9/+9redC6Vc++3u\ncSEE/vd//xczZ87EKaecgjlz5kDXdQDAwoULcdZZZ+GMM87AN77xDbz33nsFn3eiwcDwJuqlM844\nAy+99JLz+z//+U/MmjUr53ZCCCxatKjHfU6fPh1XXnkl/va3v2HTpk0AgHHjxkGSpIKPa+zYsbjm\nmmtw1FFH4YknnnAeF0Jg8eLFUBQFv/71r3HppZdi0aJFuPzyy3HTTTfl3NeiRYtw/fXX49VXX8WY\nMWPw7LPPAgA2bdqE2tpaTJgwwfNe4XAY//mf/4lrrrkGS5Yswfe+9z1cddVVAIBHH30Uxx13HF5+\n+WUsWLAADQ0NMAwj57FmikQiuOqqq3DDDTdg0aJF+OEPf4jrrrsOhmHk3G9jY2Pex1944QUsWrQI\nzzzzDJYsWYKGhgY8+eSTAIBbbrkFDz74IBYuXIibbroJr7/+esHnnWgwMLyJeun444/Hxo0b0dLS\nglgshlWrVmHKlCk5t73++uvxhz/8AYlEott9/v73v8d3vvMdLFiwAGeddRZmzJjhBEt/nXzyyc7P\n8+fPxxlnnAEAOOaYY5zqONOxxx6LCRMmQJIkHHLIIdi5cycAYMWKFTk/6wcffIBx48Zh6tSpAICz\nzjoLX3zxBXbs2IExY8bg7bffxvvvvw+/34+77roLdXV1BR376tWrMX78eBxzzDEAgJkzZ6KtrQ3b\nt2/Pu998jy9duhTf+ta3UF1dDVVV8e1vfxuvvPIKAGDMmDF46qmnsH37dhx77LH45S9/WdjJJRok\n6mAfANFQoygKTj/9dCxcuBCjR4/GiSeeCFXN/U/psMMOw3HHHYdHHnkERx99dN59BgIBXHrppbj0\n0kvR2dmJRYsW4fbbb8fEiRMxbdq0fh3vyJEjnZ8XLFiAv/3tb4hEIjAMA/mWNqiurnZ+VhTFaV5+\n5513cMkll2Rt39nZiYaGBk8LhN/vR2trKy655BIYhoFbbrkFjY2N+M53voOf/OQnBR17a2srRowY\nkXVsLS0tefeb7/Guri48/PDDmDdvHgBA13WMHj0aAPCnP/0Jf/rTn3Duuedir732wvXXX4/jjz++\noGMkGgwMb6I+OPPMM3H33Xdj1KhRPfbZ/vSnP8W5556LiRMn5ny+tbUV69evd6rWESNG4Pzzz8ey\nZctQX1/f7/C27d69GzfccAOefvppHHLIIfj8888xc+bMgl+vaRrWrFmT8yKkrq4O+++/P5577rmc\nr7388stx+eWXY8uWLbjsssucSronY8aMQXt7u/O7EAIdHR0YM2YMVFXNud+pU6fmfLyurg4zZszA\nd7/73az3+dKXvoTf/va3MAwD8+fPx7XXXotly5YVeGaIBh6bzYn64Oijj0ZjYyM2btzYY4VWV1eH\n73znO7j//vtzPh+Px3HllVd6wmLr1q34+OOPceyxx/bquFRVRTgczllRt7a2oqKiAvvvvz80TXMq\n0EgkUtC+V69ejYMOOgh+vz/rvY488kg0NTXh448/BgA0NDTgZz/7GYQQ+PWvf4133nkHgBmSY8eO\nhSRJ3R6r7YgjjkBzczNWrVoFwBxfMH78eEycODHvfvM9/vWvfx0vvPACYrEYAOCpp57C888/j9bW\nVnz/+99HOByGLMs48sgjezXWgGgwsPIm6gNJknDaaachFotBlnu+Bv7BD36Ap59+Oudze++9N/70\npz/hvvvuw5w5cyCEQFVVFX75y196RqAX4phjjsEf/vAHTJs2DW+++abnuYMPPhjTp0/HzJkzMWbM\nGPziF7/Ahx9+iNmzZ+O///u/e9y3fYtYvve67777cOuttyISicDn8+Gqq66CJEm48MIL8etf/xq3\n3norhBCYMWMGpkyZgh07dnheryhK1ntWVFTgnnvuwa233opoNIrRo0fjrrvu6na/I0eOzPk4AGzc\nuBHnnHMOADPYb7vtNowePRrTpk3Dt771LSiKAp/Ph9tuu61X551ooElcz5uIiGhoYbM5ERHREMPw\nJiIiGmIY3kREREMMw5uIiGiIYXgTERENMUPmVrGmpq6i7m/UqAq0tUWLus89Ec9j//Ec9h/PYXHw\nPPZfsc9hbW11zsf32MpbVbPvKaXe43nsP57D/uM5LA6ex/4bqHO4x4Y3ERHRUMXwJiIiGmIY3kRE\nREMMw5uIiGiIYXgTERENMQxvIiKiIYbhTURENMQwvImIaNh6443XCt723nvvxI4d23vc7sMP38cN\nN/y8P4fVbwxvIiIalnbu3IFXX11c8PZXXXUt9t57QgmPqHiGzPSoREREvXHXXb/D+vXr8Mgjc2EY\nBnbs2I6dO3fgnnv+iN/+9jdoampELBbDD35wOaZOnYYf//hyXHPNz7F06WuIRML44out2L59G668\n8lpMmTI153u89toSzJv3dyiKgoMOOgS33XYL6us34M47fwefzwe/349bbvktdu7cnvVYdXXuqU8L\nsceGd0c4gfc3NOLYg+sG+1CIiIa9f7z+GVZuaCzqPo87uA7nz5ic9/l///fZeO65f+D7378MDz/8\nIDQthT/+8c9oa2vF8cd/DWeccRa2b9+GG2/8BaZOneZ5bWPjbvzhD/fh3XeX44UXns0Z3tFoFA89\n9AAeeeQJVFRU4Oc//yneffddvPzyyzjnnPMwa9a/4YMPVqK1tQUvv7wg6zGGdx9ceecbaO2M46ZL\njsOk8X0/gURENDQccshhAIDq6hFYv34dXnzxOUiSjM7OjqxtjzjiKABAXV0dwuFwzv01NHyBiRO/\nhIqKCgDA0Ucfg/Xr1+PEE0/CH/7wP2ho+AJf//ppmDRp35yP9cceGd5b23YiPOFNSMnD0dwRZ3gT\nEZXY+TMmd1slDwSfzwcAWLJkETo7O/HAA39GZ2cnfvjD2VnbKkp6gREhRM79SZL3OU1LQZJCOPbY\n4/HnP/8Ny5cvw5w5N+PHP74652Nf/eqxff4se2R4f7ztCyjVbTBG70RLZ3ywD4eIiEpAlmXoup71\neHt7O/baa2/Isow333wdqVSqT/vfZ59J2LbtC0SjEVRUVGLVqg9x1VU/xrPPzsOUKSfi9NPPgBAC\n9fUbsGXLpqzHGN69dPykA7G4CZArO9DSwfAmIhqOJk3aD59+ugH33XcnKiurnMdPPnkGfvGLa/DJ\nJ2vxb//2TdTV1eGRR+b2ev+hUAhXXHEVrr32J5AkGUcccRSOPfZY7NzZghtv/AWqqqrg8/lw/fU3\nob7+06zH+kMS+doDykxTU1dR93fjit+ipTOCQyLn4yfnHlHUfe9Jamuri/632dPwHPYfz2Fx8Dz2\nX7HPYW1t7m7dPfY+7y+P2Q+SL4mmcOtgHwoREVGv7LHhPbFmPACgLdk2yEdCRETUO3tseI8JjQIA\nxBFGPKkN8tEQEREVbs8N74rRAADJH+egNSIiGlL22PAeW2FW3pI/xtvFiIhoSNljw3uME96svImI\naGjZY8M75AvCLwcg+eNoZuVNRDQs9WZJUNtHH32ItjbvnUjlsAyo2x4b3gAwMlDDypuIaJjq7ZKg\ntn/+88Ws8C43e+QMa7a6ijFojDWiqSt7UnoiIhra3EuCXnDBRbj99lvQ1dUFXddx9dU/w+TJB+Lx\nxx/Fm28uhSzLmDp1Gg455FAsW/YGtmzZjDlz7sD48eOz9pu5DOjVV1/nLANaWRkCIJdkGVC3PTy8\nxwItQKfWPtiHQkQ0rD332UtY1bimqPs8uu4rOHfyWXmfdy8J+uijf8YJJ/w/fOMbZ2PLls24994/\n4J57/oinnnoc8+cvgqIomD//WRx33NcwefKXcc01P88Z3LmWAf3ww/fx1ltLcc4552H27AuxaNHr\nJVkG1G2PDu/a0FgAQAysvImIhrM1a1ajvb0Nixe/DABIJMzu0pNP/jquvvq/cNpps3D66bN63E+u\nZUDr6zc4S362tOzClCknlWQZULc9OrzrKszwTildMISALEmDfERERMPTuZPP6rZKLjWfT8VPf/oz\nHH64dy2L6677JbZu/Ryvv74EP/nJf+Chh/7a7X5yLQMaCAScJT/XrFlZsmVA3fboAWt25Y1gFNE4\nZ1kjIhpO3EuCHnro4XjrrTcAAFu2bMZTTz2OcDiMRx6Zi0mT9sX3v38ZqqtrEI1G8i4lCniXAQWA\nVas+xEEHHYpnn52Hzs4OfPOb38QFF1yE+voNzmOnn36G81ix7NGV96hgDSQhQw5EEYmnUBXyDfYh\nERFRkbiXBP3hD3+E2267Gf/1Xz+EYRi4+urrUFVVhfb2Nlx22fcQClXg8MOPwIgRNTjqqK/ihhv+\nG7/97Z3Yf/8DPPvMtQzokUcehVgsihtv/AVGjaoBIJdkGVC3PXZJUHvZtp++fgvicQM/O+pa7L/3\niKK+x56ASwj2H89h//EcFgfPY/9xSdABEpCCkNQUIvHUYB8KERFRQfb48A4qIUiqhs4oJ2ohIqKh\nYY8P7wo1BABoj4YH+UiIiIgKs8eHd6XPvFevIxEZ5CMhIiIqzB4f3iMClQCAjjjDm4iIhoY9PrxH\nVZgj+Xa1tw3ykRARERVmjw/v0RXm7WE7OjrQHk4M8tEQERH1bI8P70qfOWBNUpNYvallkI+GiIio\nZwxvn9nnDSWFpvbY4B4MERFRAUo6Peodd9yBDz74AJqm4T/+4z9w+umnO88tX74cd911FxRFwfTp\n03HFFVeU8lDysm8Vk9QUWjvZbE5EROWvZOH97rvvYuPGjZg3bx7a2tpwzjnneMJ7zpw5ePjhhzFu\n3Dh897vfxcyZMzF58uRSHU5eITVo/qBoaOviRC1ERFT+Shbexx13HI44wlx6bcSIEYjFYtB1HYqi\noKGhATU1Ndhrr70AACeddBJWrFgxKOHtV/wAAJ9foK2NlTcREZW/koW3oijOYuXPPPMMpk+fDkVR\nAABNTU0YPXq0s+3o0aPR0NDQ7f5GjaqAqipFPcba2mqM1M3K2+8XaI8kMXZsFSSu690r+SbOp8Lx\nHPYfz2Fx8Dz230Ccw5IvCfrqq6/imWeewV/+8pd+7aetLVqkIzLZK78IISBLMiTFQCKpY+u2NlQG\nuTRoobgKUf/xHPYfz2Fx8Dz237BYVWzZsmX4v//7P8ydOxfV1ekDqKurQ3Nzs/P77t27UVdXV8pD\nyUuSJPhlP2TVXHi9jYPWiIiozJUsvLu6unDHHXfgwQcfxMiRIz3PTZw4EeFwGNu2bYOmaVi6dCmm\nTp1aqkPpkV/xAbIZ3h2R5KAdBxERUSFK1mz+8ssvo62tDVdffbXz2AknnICDDjoIp512Gm6++WZc\ne+21AIAzzzwT++23X6kOpUd+xY9kyhxpHo5xXW8iIipvJQvvCy64ABdccEHe54877jjMmzevVG/f\nKwHFjw6YS4IyvImIqNzt8TOsAYBf9kMXZmhHGN5ERFTmGN4w+7wNGIBksPImIqKyx/BGeqIWyDrC\ncYY3ERGVN4Y3zD5vAGZ4s/ImIqIyx/CG2ecNAKrPYJ83ERGVPYY3rPu8AYRCEitvIiIqewxvpPu8\nQyEgHNMG+WiIiIi6x/BGus87GABiCQ26YQzyEREREeXH8Ea68g5YS3tH46y+iYiofDG8Afhls89b\nVc2KO57UB/NwiIiIusXwRrryllUBwGw6JyIiKlcMbwABJQAAzrKgrLyJiKicMbwBhFQzvCXVrLjj\nSVbeRERUvhjeAIKKNVJNNu/xjiVYeRMRUflieAMIWpW3IZkVd4yVNxERlTGGN4CQGgIAGJJZecdZ\neRMRURljeAMIWgPWdCQBsM+biIjKG8MbgCqrUCQFmhXe7PMmIqJyxvAGIEkSgmoAKWGFNytvIiIq\nYwxvS1AJImkkAABxTtJCRERljOFtCaoBJHQrvDlJCxERlTGGtyWkBpHQk1BkNpsTEVF5Y3hbgkoQ\nAgKBoOCtYkREVNYY3hZ7opZgCIiyz5uIiMoYw9sSVM0pUitCQCSWGuSjISIiyo/hbQlZ85sHQwJJ\nzUAixaZzIiIqTwxvi115B4IGAFbfRERUvhjeFrvP2+c3wzvM8CYiojLF8LbYzeYqw5uIiMocw9ti\nV96yz+zrZngTEVG5YnhbglblLavmbWIMbyIiKlcMb4tdeUNheBMRUXljeFtC1mhzQzJDm+FNRETl\niuFtCTK8iYhoiGB4W+w+b3tNb85vTkRE5YrhbfHJKmRJdtb0TunGIB8RERFRbgxviyRJCClBZ01v\nneFNRERliuHtElQDiGlxKLLEypuIiMoWw9slqAYR1xJQFRmaJgb7cIiIiHJieLsErWZzRQE0Vt5E\nRFSmGN4uITUAAQHVLxjeRERUthjeLj7Fb/5XNRjeRERUthjeLn7ZBwCQVYGUzj5vIiIqTwxvF5+s\nAgAU1YCm9b/ybutK4MEX16G5I9bvfREREdkY3i4+xay8FaU4fd5PvFqP9z7Zjb8u3NDvfREREdkY\n3i4+u9ncZ0ArQrN5PKl7/ktERFQMDG8Xu89bUQwYQsAw2O9NRETlh+HtYjebS4rZZM5Z1oiIqBwx\nvF2c0eayGdq8XYyIiMoRw9vF7vO2K+9i9HsTEREVG8PbxWk2tyvvItwuRkREVGwlDe/6+nqceuqp\nePzxx7OemzFjBi666CLMnj0bs2fPxu7du0t5KAWxK2/I5ujwfjebC1buRERUfGqpdhyNRnHrrbdi\nypQpebeZO3cuKisrS3UIvebPCG8OWCMionJUssrb7/dj7ty5qKurK9VbFF1Ws3mxwlsqzm6IiIiA\nElbeqqpCVbvf/U033YTt27fjmGOOwbXXXgtJGtyUs6dHFZLdbM5mbyIiKj8lC++eXHnllZg2bRpq\nampwxRVXYPHixZg1a1be7UeNqoCqKkU9htraas/vcf9IAIBqLi6Gqqpg1ja94fObp9enKv3aT7kb\nzp9toPAc9h/PYXHwPPbfQJzDQQvvs88+2/l5+vTpqK+v7za829qiRX3/2tpqNDV1eR4Lx1IAgJSW\nBAA0t4TRVBPo83ukkpq1Pz3rvYaLXOeReofnsP94DouD57H/in0O810IDMqtYl1dXbj00kuRTJoh\nuXLlShx44IGDcSge9mhzQ+KANSIiKl8lq7zXrl2L3/3ud9i+fTtUVcXixYsxY8YMTJw4Eaeddhqm\nT5+OCy64AIFAAIceemi3VfdA8St2n7dZMevs8yYiojJUsvA+/PDD8dhjj+V9/uKLL8bFF19cqrfv\nE6fyBitvIiIqX5xhzUWRFEiQYMCsvDnDGhERlSOGt4skSfApPqfy7uk+7x3hXXjsk38grsUH4vCI\niIgADOJo83Lll33QhTVKvIc+7/s+eghdyTDGVdTi9H1PGYjDIyIiYuWdKagEkDQSAAC9m8p7W2MY\nXckwACBpJAfk2IiIiACGd5bairGIGREEv/oqtic3593ulfcbnJ8lzn9KREQDiOGdYXyFORe7pGpY\nrb2af0N3i/ogT+tKRER7FoZ3hnGV6YVUVPjzbifAe8CJiGhwMLwzjK+oTf8iCquoZTabExHRAGJ4\nZxhfOc75OYEINEPLvaGn8GZ4ExHRwGF4Z6j2V+EHX/4h9I4xgCTQGm/r8TXs8iYiooHE8M5h/5pJ\nMLpGAQCaYq05t/GMV8tTebNXnIiISoHhnYOqSBApc7BaLJV7KVLhSmbeKkZERAOJ4Z2DqsiAYU4+\nl8g7AYsnvYmIiAYMwzsHVZEhDAUAkNBzh3chzeZERESlwPDOQVUkQDfDO5knvHuD4U5ERMXE8M5B\nkiQo1pot21o6cm8kvNsTERENFIZ3Hgp8AICV9TuwsyWS9TxHkhMR0WBheOdhhzdkHZ2R7pvO2SxO\nREQDieGdhyqlwzsX4bpXzBD5lw4lIiIqNoZ3HnZ4S0qe6VFd3EFORERUagzvPHyKz5yIRdaR1Lqv\nrA2w8iYiooHD8M5DlRXAUCApOpKp7KZzd7HNZnMiIhpIDO88fKp1r7esI5nqofJmszkREQ0ghnce\n5ixrKiRFQ0LLUXm7f2blTUREA4jhnYeqyAVX3nqe8GZBTkREpcDwzkOWJXN+c1lHIpljxLn7VjEO\nWCMiogHE8M7DMIQ5YE0WSGiprOe9zeY9lNicw4WIiIqI4Z2HYQhAN+c3j2mJ7rdlnzcREQ0ghnce\nuiGcZUFjqXj2Bp5bxdi5TUREA6eg8F67di2WLl0KALj77rtx8cUX4/333y/pgQ023RAQyQAAIKKH\nu92Wfd5ERDSQCgrvOXPmYL/99sP777+PNWvW4MYbb8R9991X6mMbVIYhIBIhAEBMdGU9z1vFiIho\nsBQU3oFAAPvuuy9ee+01nH/++Zg8eTJkeXi3uJuVdzfh7VmYhM3mREQ0cApK4FgshoULF+LVV1/F\niSeeiPb2dnR2dpb62AaVIQREMggASCLXet7pwK7f1pZzxDkXLCEiolIoKLyvueYaLFiwAD/96U9R\nVVWFxx57DJdcckmJD21w6a5m86Scq8873VS+uy2CpvZY9hZ2djPDiYioiNRCNvra176Gww8/HFVV\nVWhubsaUKVPw1a9+tdTHNqgMwwAMFUJToSvRrOfdlTckkQ5q9zZW5c0KnIiIiqmgyvvWW2/FwoUL\n0d7ejgsvvBCPP/44br755hIf2uD60rhqAIBIhKCrkawAzry3O3ezub1taY6RiIj2TAWF9yeffIJv\nf/vbWLhwIc455xzcc8892Lp1a6mPbVBdcsbB+N7Mg+DTqwFZR0cyo49fSieyxMqbiIgGUEHhbYfP\nG2+8gRkzZgAAkslk6Y6qDFQGfTj56AkIiBEAgMZok+d54b63WxI5VyGxA53ZTURExVRQeO+33344\n88wzEYlEcMghh2D+/Pmoqakp9bGVhZAwP+fOsDe8vROziJwBzcqbiIhKoaABa3PmzEF9fT0OOOAA\nAMDkyZNxxx13lPTAykW1MgotALZ37fY8nll557rXO+lrARSJfd5ERFRUBYV3PB7H66+/jnvvvReS\nJOGoo47C5MmTS31sZWGkbzSAXM3mmaPNvQm9qf1ztO31OvwVYyBaTy71YRIR0R6koGbzG2+8EeFw\nGBdeeCHOP/98NDc344Ybbij1sZWFmmAVhACi1uIkH3zaiBfe3gJkNJvruje869s2AQCUmhb2eRMR\nUVEVVHk3Nzfjrrvucn4/5ZRTMHv27JIdVDmpCKpAVIJm6ACAB55fCwA4cLLrukcS6EqGcdt7D+Oc\nyf+GQ8cchNZ4KwBApHzs8yYioqIqeHrUWCw9g1g0GkUi0f0a18NFZVAFhAxN1z2Pp9y/S8DqjlXY\nEdmFBz5+GADQEm8DAIhkiH3eRERUVAVV3hdccAHOOOMMHH744QCAdevW4aqrrirpgZWLiqAPEBI0\n4Q3vpK65fhMQGQndaoe3prLyJiKioioovM877zxMnToV69atgyRJuPHGG/HYY4+V+tjKgll5S9AN\n74xqKS0d3lLGgDUhhFN5QzYY3kREVFQFhTcA7LXXXthrr72c31evXl2SAyo3duWti+6azYUnoBN6\n0pk+VZJ1DlgjIqKi6vOi3HtKNVkZVCGEbC5U4pIy3GEunAFtABDX4+mnFH2POVdERDQw+hzekiQV\n8zjKVkVQBSBBhze8tYzKO2GkB/DFtHR4S7LOAWtERFRU3Tabn3TSSTlDWgiBtra2kh1UOamw+ryF\n8PZdp3QNAdd2yTzhzT5vIiIqtm7D+4knnhio4yhbiixDEjIMpKC5J2KR3TOsGXkrb7DPm4iIiqzb\n8J4wYcJAHUdZkyUJAgZSWrqpXJJdt4pJQMod3qmoazsDBpjeRERUPH3u8y5EfX09Tj31VDz++ONZ\nzy1fvhznnXceLrjgAjzwwAOlPIx+kyADEEhprn5v1R3eAkmRDu+2RIfn9ULSQEREVCwlC+9oNIpb\nb70VU6ZMyfn8nDlzcP/99+PJJ5/EO++8g88++6xUh9JviiRDSAaSrvD2VN4QSBnp9c2d8DbM0ysk\n721mRERE/VGy8Pb7/Zg7dy7q6uqynmtoaEBNTQ322msvyLKMk046CStWrCjVofSbLCkABOJJVwgr\n3klaUsIV3vF28wctCAAQYOVNRETFU7LwVlUVwWAw53NNTU0YPXq08/vo0aPR1NSUc9tyoMgyJFmg\nI5JuGpdUb+Wtwd1sboV3yhwf+2w3AAAgAElEQVSPzsqbiIiKqeAZ1gbbqFEVUFWlqPusra0uaDtV\nMU+TUFzXOlblLTQVkj/puQu8PWk1m1vhDVkv+L2GouH82QYKz2H/8RwWB89j/w3EORyU8K6rq0Nz\nc7Pz++7du3M2r7u1tUW7fb63amur0dTUVdC2spABCdi2M31vu2SHt+5zqvB9qvZGQ3gHuhJh87lU\nABIAQ9IKfq+hpjfnkXLjOew/nsPi4Hnsv2Kfw3wXAiUdbZ7PxIkTEQ6HsW3bNmiahqVLl2Lq1KmD\ncSgFUWTzNHVE0/3akDXz/m3NvP6pwlhMm+AdnCecZnP2eRMRUfGUrPJeu3Ytfve732H79u1QVRWL\nFy/GjBkzMHHiRJx22mm4+eabce211wIAzjzzTOy3336lOpR+UxUF0IHOqGvaU1UDdBWQzHu4fQhA\nldOn0y/7ENet5nb2eRMRURGVLLwPP/zwbpcNPe644zBv3rxSvX1RqbIV3rH0oDQoGoSuOn3fighA\nkdN98j7Fh6iumE0bDG8iIiqiQWk2H2pUK5S7oq7R5opZeUtOePuhSunwViU1fZ+3zGZzIiIqHoZ3\nAXyKGcrhuN3nLZzK2x6spghvs7kqqxCaFeYyK28iIioehncB/NatYl0xK7xlA5IkzD5v2A/5PM3m\nqqxA6NbvisaVxYiIqGgY3gXwWfeX68KqoJ3bxNzh7Tebyi2qpMIwrN9lnUuTEBFR0TC8C1Dh91k/\nmRHszGvuCm9J+Jy+cQBmFW5V3pJVeXdGk7j/2dXY1hgekOMmIqLhieFdAL/PCm/JmkdNsSpwwzXj\nm65mNJur6cpcMdf0/ufyrVi1sRn3Pbt6AI6aiIiGK4Z3ARTJOk2SgCSlK293szkMNavZ3A53STYr\nb3s98GSKA9iIiKjvGN4FUOxbwCSByqAPvoDVg+2qvCXd22yuuprNoegw2OlNRERFMmQWJhlMslV5\nS5JAZciHsGrAACB0BYmNR0EZ2QRZqvbcKqZIKgAZQpedypuIiKgYWHkXIN1sbuDEr4yHL2D1fRsq\njLbxSG35CoRhB7b9GsXZxu7zdkjSwBw4ERENSwzvAshWEP/7qZNx5tcmweczk9i5jxuArouMZnPV\n2UbKvM+bVTgREfUDw7sAduU9fmwIkiRB8dmVtyu8hchoNndV3jL7vImIqHgY3gWQrSVBDWGGtqxa\no8Vdo811XXjmNreb0IWuAIoGwzDSO2SzORER9QPDuwB2Fa1b4S1Z93m7m80NQzgD2wCkg1xXIUlA\nyuDiJEREVBwcbV4AO5S/6NyGz9o3A0rKfMJwVd6GAclVUctOs7n537iWXguceuetj3dgQm0lDti7\nZrAPhYioLDC8C2D3eS/e+joAQLZWGfMMWDPSk7CYr/Fuc+fqe3AELhqQ4x1OYgkNjy7cAAD4yy9m\nDPLREBGVBzabF0B29WUDgJCyJ2nRDYHHXql3frfDW1LNKj2hJyBghntnJIkHnlsDg6POe6TpRs8b\nERHtYRjeBVAk72kSMMy7vQxvn/fGhnbXa8zntN2T0tsgXZl/UN+Enc2REh0xERENZwzvAmSGNwAr\nuNN93PGk7unztkebG51jobWMN3+Gd05znfeP9YhniIgoG8O7ALKsZD+oe4cLRGIpz+8KXK+xKnQD\n3hHnDO+esWeBiCgbw7sAco7KWxgZ/eAwB1c5r5HdK45Z94lL3srbYHj3iOdoePvX+t247I6l2N0a\nHexDIRpSGN4FyN9s7hV2Vd+K69TaQS/YbN5rDO/h7c8vfQLdEFi2eudgHwrRkMLwLkDmaHMATjXt\n5g7j5vZk+glhbtssbYLkT1cYKY6k7hFH5A9v/PMS9Q3DuwC5Ku9RVaFuX7P4vW3pX6yg362uQ/Co\nt5yHUymGd08Y3kRE2RjeBcjV5z1+VBWqQj4AQCjQw1w3OZrYAVbehWCzORFRNoZ3AZQczeaqrDrL\nfFZX+Lp9vcjRxA4AyZSe83FKY3jvGbhWD1HvMLwLIOf4ZnEv/1kdyg7vQyeNxrUXHoWvHTouu/KW\nNQACb3e8jHe2vwcAWPDOFsxd8ElRj3s4YHYTEWVjeBcgksq+jcW9/Gd1hT/r+ZHVfhy272jzOeE9\nzZI/DskfxxfJT/HEp88CAJ5ftgUr1u0q8pEPnLWbW7BibfGPn5U3EVE2hncBJo3YBwDw5VGTncfM\nZnPz5xGV6fAWmlmFj6kYCQCQ5exmcykQg+RPrzJmrxMOwGmKz2f+ss34+LPmPnyK0rrrHx9j7kvF\nbznggDUiomwM7wJU+6vwwIw7cOa+pzqPqa5Z1/yq7Axei6+ZisTGo3DUhP0BABUBNavZXArEIAVi\nzu+NkXQYdxdWndEkXnznc9z7zOr+faAS6unio7fKObxffGeLs+IZEdFAYnj3guIKbFVWPfNu71NX\nBQCoCYzAdbPOwKTx1QCAiqAv655wSUlB8qfD+4GP/+KsEa7p3YR3JJn3uXKR1Io7gr6cm83nL9uC\ntz7eMdiHMaSV8bUZUVljePeCu59blVQ4y2ZIwMGTRgEAamtCzs8AUBlSs/q8IQlP5d2aaIW692YA\ngN7N7WPhaCrvc+Wi2CPoyzm8iYgGC8O7F7Iqbye7JZxxwpfwzan74rJvHOp5TWXQlzUPOmTDCe+v\n7zPd3F9tAyBrWZV3Uk9i4ZZX0Z7oQGe0/CvvRLHD23U6/ufvH6KxPZZ/40HCC4y+4y1iRH3D8O4F\n9/3e7j5vSQJURcbZ0/ZH7UjvzGuVOZrNIRmQ/HEoIoBzDzwLB4a+AknVIPnj0DIq73/Uv4CXtryC\nFzctQke4/MM7WeRZ49x93vUN7Zj32sai7r8YONlO37HZnKhvGN69oHbT551PZUjN7vOWDEi+JFQj\nCAAwdOt5SUBzVXHtiQ6s2LnS+b0jo897xY6VWLBpUS8/RWkVu/IWGVVtOS7mknnBRURUagzvXvBU\n3pKCQtK7MuiDENnN5lBSkI0AAEDT0o+7+7zvWzU3/RJJdgashQLm/h7f8DQWbX0dulE+M7UVu887\nM6zLsYk6VeRBekREPWF490L2aHMzSLrrtzNvFcucpCUBSQIk3QzvlDUOTZIM6Fafd2ckieZYi/Oa\nqBZzKu+qjBndolr59AMnSthsnuv3cqAxvPuNfd9EvcPw7gXPaHO5h8VILLIsZd/n7TMnaJE0c3IX\n3S5WJQOaYQbBtX98C7rQceDIAwAAsVQMHZEEAMCvKp77qbuS4d5/mCJyH0vxR5tn/l4e4a27Dox9\n3n0nCup8IqJMDO9eUFyBrcqq606xHsoG4X1e8pshbM/GJgzreUkgkdTxxqrt0GWzyq5UK+BX/Ihq\nMcQTZjDqhkBcT8/QFklF+vyZisHdtF30Pu/MyrtMwlvT0sfR3b35ROXglZUNWL+1bbAPg4qI4d0L\n7nW9PQPWemzyywhvnxnMwqq8Dd16Xjbw7Fub8bfFn0JSzI5wvxxEhRpCTIs5FZ4hBLqS6cAO55h7\nfSC5w9tdeacMDXd/+Ces2LEy18sKkt1s3uddFZW72uaANSpniZSOp17biN8/uWqwD4WKiOHdC1kD\n1iw9ZXfdqFDOx42kWXk74S0Z2N5kNoFLqtkRHrDCO6rFnYFRhiEQTrnDe5Arb1d4ufu8t3Y24LP2\nLXh8w9N933eZjjZ3BzYHrFE5K5fWKiouhncvSK5RNYprkpae3Pz94zC+60QkPj3G83gqYTbD233e\nkiTgXApY06X65QBCaghxLY6UZm5oCOFpKh/sZnMtT+WtGVquzXsl84unXAasuQepsc+7H6w/Z+bY\nBiLqHsO7j3yyAvf0qN0J+lVMCnwZRkcthKv/OxaVYRgCuqvytkmqGXw+KYAKXxACAklh9pUbRmaz\n+WBX3q4+by0d3rmWUu2tzLDOvO97sLgvWDjavP/K5aJsOOK5HZ4Y3n2UOT1qTxTFOtVGeluR8iMS\nT0HX0n3e6RdYlbcUQIVaYT1vPmYIIJxKjzAPJ7NDMvMfbCKl4911u5zqvZjczebJZPrnLtcxLnrv\niz7tO/N7Rx/gLyJNN7BkZQOice+88u7AZp93/7Fpt3TKpauJiovh3UfmwiSmQu5R9dnh7VqkROgq\nWjsTcPLUGm0OpPu8VRFASA1ab2pW45l93pnN5l/s7sIPf7cUb3603XnsuTc346EFn2D+si0FfT63\ndVta8doH2/I+7xlt7ro4CLtuYVv4Xu/fF8jRbD7AX0TPv7UZT762EX9fUu953N1Uzmbz/mN4l065\ntFZRcTG8+6jQ+7xtimIlvHuFMV3BLY+uRDhid3obTsVsh7cCPypUc8CbZFXjhiE8TdI7Irs8s6wt\nX7sLAPDU6585jzU0dgEANu3o7NVxA8Cd8z7C35fU521+y9fn7b7/3JD7tiJa1gxrA/w9tHFbBwCg\ntTPheZwD1orD/nOyabd0mN3DE8O7j1RZ6dWiCnblLQz7vxKc028FuiS5m83NKlsRfgTUgPVYesBa\nXDPv8z669itoT3Rgbct656V2FSP7EqhvMwPcp5qj4/vTbJ6vOvKMNk+6wtvVIiAUb/gV/J5Z93kP\nbFC2dZnHPbI64Hnc22wuEI2nsO7z1gE9tuGEAVM6bNUYnhjefaRIhU2PalNVO6itjQ1X5S7Sk7TY\n7CpbNgLpJnopfatYzArv0yedAgB4a9sK57VOv/A+a3DvqofwcdNa+K33T/ajSszXt5tvkhZ35d3X\n8M5s8hvoUcntYfO4R1T4PY+nXIP0NM3AnfM+xp1PfYT6hvYBPb6hzv6nM9AXZW5CCCz9cBt2tQ7u\nfAmlwlaN4Ynh3UeqrOLkoycAAA760qgCtvc2m8vCHd7Wn8E1YE3yJyAMCbLwwWc10UtyepKWmBaD\nLBQ89sIuHDhyf2xo24hdkd3m7uzAC5lBMn/Ty/D5zPdI9WPu8XwDX9yjzd39v+5BdYbct+VMM9/S\nEAKabmTNvFYq9mfOfD8tY5KWLTvN7oimMlxvvJw5zeaD2POweWcnHnulHr+a++7gHUQJsfIenhje\nfeSTFXzntC/jjh9NwWH7ju5xe6fytprNZeSqvN3hHYNIhqDrrv512Wo2N4CYFofQfdi8vRMnjDfv\nH/+0bZP5vN1vnjJHqTdGmyGpZgWZ7EezuZ5jGlAhhGeeb/e0oRHXKHhD6Wt4Zw9Yu/z3b2DO3z7o\n0/56w90FkDkoTcszYI0LbPRNb6vD9zc04sEX1xWlqozEzC6q4VqgsvIenhjefaTKKmRJwtiRuWdP\ny9o+Y7S5e7S63Q9uN5srqg7Jn4RIhKDpBnyKtYqY5K6844BuTtEaUioBAM+/vRHN7bF0FaOkB4nF\nVXOFMvfgqriW7hMvRGbl3RZvx/ee+ylWtb0PqGY4u0MtYbgCW+1bs3n2gDXzd7vSLaXWrvT88cmM\nFgv3eXT/LGekdyyhIZbo/2Q1A03TDWzd1TVg79fb6vCP89fivU92Y3cBTd2vf7itV5/FMATunPeR\n526NoYyV9/DE8O4j91SphVCt0eb2JC2q5FrW0x6wZjWLT5xo7lskQkhpRlazOWCGt6GZj8swt4+m\n4nhx+efpK20lHRqblXcANenp835iwzO4d9VD+KhpbUGfQc+oPjd1fI6ElsCyliUIHvkG4Is74W0I\nA5qhQYHVV9zHyjuzz3sgFwGx108Huq+8NU/l7Q3vK+5+C1fc/VaJjrB0HlrwCW55dOWA9eH3tTp0\n5k/Io7E9hsdfqcctj+afXz+ztaSxPYZ1W1rx10Wf9umYyg2ze3gqaXjffvvtuOCCC3DhhRdi9erV\nnudmzJiBiy66CLNnz8bs2bOxe/fuUh5K0Vz+le/hrP1metb2LoRdeUtWda1KKn71PWu61IxmcyVo\n9puKRAU03Ug3m9vN6rIBXegwUnZ4p5/fvKPTudIWcgp1FWMBAElEoY773FMl2iPUN1rN7T3pbrIH\nSTGgjGx0giypm8EXRJV1Avp2q1jSSHpaEIq95Gh3uqLp982cRU3zDFhzDTTsY7N5S6wVN7xzOza0\nbuzbDors/Q2NAFBQZVsMfa0OMy8oMxXy/0vmn2y4dX2w8h6eShbe//rXv7B161bMmzcPt912G267\n7basbebOnYvHHnsMjz32GMaNG1eqQymqI2sPxxn7fb3Xr3Oaza3wliDjgL1roMiS0w/ujDb3m1+Y\nduWdsjPErrytMBO6VZFb64VLio4dzRGzipEMQNYxOjAKFx30LfP5jACt9JnN7YVOr6plfAnYI95t\nysgmZxR2QrdmiDMqrffuW+W9UnseoWNegz20qbezRdW3fYZH1j2BVB/mWe+KuirvjLECqTxzm/e1\ngnz1izfRlmjH3DWPFbS9EAJPLKnHui2lvT2tKuTreaMi6Gu+9HSPfUE5LHX765DHPu/hqWThvWLF\nCpx66qkAgAMOOAAdHR0Ih8M9vGr4UuzR5s7tZVbftyJD2KPN7T5t1aq8k0GkdAML3ramFrUGrNnL\nhcIKbwjF83wkrjkBH1KDOGj0gZ7nbVU+c0BbOJk7vNvi7Xh03ZOQrIuJzCrHvtf8lJFnw4hXQK5u\ncyrUlNXfrYgghC47y6D2VhhmOMk1zX16/b2rHsL7uz/CxwV2Dbh1uirvzJDwDNJzN6FrfWz+tVpy\nNFHYRcb2pghe/WAb7pz3UZ/erzvukfWZF2yl4q4OOyNJrN3ckndbz/H11I3ShzJ6uGXdnlh5G0Lg\nd3//EP9c8flgH0rJ9G6asF5obm7GYYcd5vw+evRoNDU1oaqqynnspptuwvbt23HMMcfg2muvzeov\ndBs1qgKq2rum6p7U1lYXdX/dGdlsNT/azeaKitraavhUGYmUt887FJKBlFlZ+/wqWtpTwCjXJC5W\neAvdrIpGjrA+hxXOmiGchU1GVY/AXnXWrWzW/u3PXREIAl1AzIjmPBfLPnkbK3evQuAIGfH3T0f1\niJBnu+QXZrhVh6ogkgHIwSg0w0BtbTVi7eaAMkX2QST9gJrM+R7vbVuFUcEafHns/p7Ho/EUKoLp\nqk8Zux1GR61nm978/Xyh3v+9U64vPUOSPK/3B9LHJrv6XYMVfmc7dytBT+8dCJj/FDVDK+g4O+Lp\nC7Fi/3/c1pluUQm5Pk8pqT7FeZ9fPLQEja1R3Hftydhv75qsbSOx9EVVVXXQeV2u44y7rrnyfY6a\n1phnm5he+N9tKGgKpy+cC/k8w+Ezh6NJfNrQjk8b2nHJN78y4O8/IP9mSv4Olsz7ZK+88kpMmzYN\nNTU1uOKKK7B48WLMmjUr7+vb2orb91ZbW42mpoEbTdvVZX1BWAEqdKCpqQuyLOWYpMX6YhYyOrvi\nUCUFCddrneZva8BaW6s5ktsO/65Iup9Y0hR0tVnPWzO0NTZ2QpIkdMbMintXuAlf7GyCX/Z5+vI7\nwzFnv1IwjJaWCJpC5nsmkjpefOdTqOOARFQ4rQApI4mmpi7s6jAHOukpCdD8kIKRrPNtCAN3vvMQ\nAOCBGXc4j2/Y2oY7nlyF807e32yokAClphkpyfBML9ubv19LR5dn+22NYUgSMKG2yrPdZ9s7cO/T\nH+Pq849EY0u6RSIWT3le3+EKuIireb2tPepsF0+mq+jujvWtxmVY9Nkbzu/23ydTMqXj3U9247iD\n69DWnv73UKz/j1es24VdLVEctl/61sfWtuiA/DuJu85vo9XPvnFLC6p82Y2Dja576Ztawmiq9uf9\n99zSkm7ty/c5OjLOZVNzz68ZSlpb0/8f9/R5Bvp7sVQiroWEBvrzFPsc5rsQKFmzeV1dHZqb002d\njY2NqK1NV05nn302xowZA1VVMX36dNTX1+fazbDh3ELk6vMGkNHnbQ1YU+1FjmVougG/Yo3Ylg0E\nfIrTbG73ecPwNpvHk5qzTYUagk/2eZ5/7q3NeGfNTqfPOqEncd1bv8b1Cx/CZ9s7nGN2z58uBSOe\npuKuWNJpAZAMn3MsQtagG4bTbA5dgdB8kBQdCc3bdJ7Qc98+ttIaLLVw5RanA1JSNchVfR/5HE15\nJ0/59V/+hRsf/lfWds8s/QyRuIZnlm5yms1HVPg8zea6YeCL3el/nO4+b/e98O6R/d01Xc5bu8Dz\ne0TLfaG6YPnneHThBjz56saSNO3OXfAJFiz/HM0d6XM1UPO25+qXzddk7668e1qOtZAxEpnvM9xW\n4ervx+mIJPHBp03FOZgBMtz+hrmULLynTp2KxYsXAwDWrVuHuro6p8m8q6sLl156KZJJ88t85cqV\nOPDAA0t1KGUhff+vNe847D5vKV1NWuEtK1Z1bshIaQYCavo+74BPTt8CZjWb6xrM6t0K51hCd6rz\nkBqCIiuQhAzJev6fK7bi4X+uzxpwFg5twd3/SPehulcFU0Y1oiWRHhxlGMK5QJCFz6m8JUWDpgkk\nrNHmwlAgUubFR1vcezUaTaXf372wiv1FLqvmY0I3L07kEd5+UPMiQcsK5lw6k7mvhA1hYHc0/cVk\nT6aj6Qa6oklUhXwI+BVPiK1YuxtrXQPFtDyD19yz2el5phAzRPbjLbHcg9B2tZih/vmu0t7j3tKR\n/ruUMrzdrXG5Lm7yjST3hHcPo80LGayV+d7D7YvffQ76MjPh//z9Qzzw/JohNfVvrgmlhpuShfdX\nv/pVHHbYYbjwwgsxZ84c3HTTTXjuueewZMkSVFdXY/r06c5tZKNHj+62yXw4kK0Ba3a/tV15T6yt\nAmA1nctWVW6FNwwFKV0gqKbv8w74FSek7cldNN2AJBSn2Tye0JyAt5cTlaB41wuHQEJPoNJeKxyA\n0FTPVbq78lZrt+PvDQ8imdJR39BuTlpih7er8oaiIaUbSOr2iHgZ0Mzwrm/x3pIW1dKh61772/4y\ntS8OjC6zGVcOeQc8aprAw2sfw8+W3YRIKuoJccMQePHtLc5kOJ3J3IG3cMur+M27v3fudbfvCtB0\nga5oCtUVPvhUb3hv3tHh2UfKM2DNtba5PUJd0pHQcg9Ei2vZrQ/5Rv/b/w/phijpl1OLq0uglMud\nunMkZ3jnCdGwK7x7Or5CgjgrvHvY519eXo///r/lPe63XLg/X19Gntu3Cw6lqX/zXSwPJyXt877u\nuus8vx988MHOzxdffDEuvvjiUr59WXEqbzugrdHml5xxMPbfewdeiSsw7AFp9n+FDE0zYOj23Oe6\n2Wwup8MdsKoj4Qp1pG/NspcTlYXqHW2uaBAQ2K9mknO/t4hXpudgR+4Q+cvL6/Gv9Y046/9NgqRo\nELoC3YCn8tZ1A0nDWr5UV2AkzGOYt+kZHD/hSAStVdJirvDuSHRiZMAcnGR/v8jWoDsRr4DQFUhB\n7/GkdANrms1j//mymwEAPzjsIhwz7ij8a/1uzH97C0LHCEABOhPp4HdXa+/sMCfvWLV7DY6qPdwJ\n70RKRziWwoSxlYgndU94B/2utdxlAy0j/gUlPgJSMIIW3Q/AHHyXTBmAZCB45FuYv6kT3z3sW1nn\nM7P1AzAHreVi37FgGKLHirM/8lXeiZSOrmgSY2sKm1WwJ+4gyZWx+YI3Ek+fn54uYgoZaZ35Pj0F\n/turd1rbGVDk8p/nyhPehkAP89rkNZRaJIbSsfZV+f+fN0yMqQlaP3mbzasr/Pi3KftClc1wkkc2\nIpwKQ7Kq8ZRuIJGy/keUzD5vJ9ytyjulGea93q5wloLm1fLY0Bjzd6E4zeZAetWyoBLElV/5sfmg\nrHtmrAqnIhgd9C668q/1Zn/0xoYOc1CcrkI3BISRWXlbzea6DL3xSxBJM7Cjrv5cd3gv3rrUqdad\nudntixFdhYhXWp/JfZtQdoDtipjHZ37BC2cZ1Q5X5e2e6tReS317s9msbs+E12atJmZW3rInxGLW\nQLTbLjsBoTFtSFR/Dv8Bq+GbsAmrxItOU3hS0yH545D8CWzsyD0NbVw3gzKoBHDKPicCQM570qOp\nmPP31Q0BrciVhbs5tdm1drm7JeGOJ1bh539a4Zl5rj/0HirCfBcoUdd0sz1V3oWFd+ZtgIV98Wd2\nKQgh8PFnzWU3Ha773PYn1IbSLWdsNqeiGVUdwG2XneBUywq8k18okgJJ1RD48ofYEdllzaomIaUZ\nSKYEhCFDkg34fa6QFq5lPo2McA5EASFhTMhscpZyVN4AsGZjJ3738GdQjRAgG051J4RAOBVBlTWR\nS6akpluVt2p++en2RDEaNF044W1oMiBk6O3mYEU7oAHvILKPm9bipS3mGIms6V11FUas0hz17k+/\nRtMMyJL3f2FP8Lmmh+1IdDrv51621J4AJ2m9zl533V6UpLrSD58qw3AtwBK3ngv61Zwz7dmfMakZ\nkHxmOLfEWz2f3WZX3idNnIqJVXtnfwaYs9XNee9ObAq+CkBYK6sV98vJHUSeytsVjvZ88u5m6/5w\nh0GuUMn3GfU83RQ5t+1L5a27jyv//jOPb/naXbj3mdX466INPb7nQHJ/hP5c8w2lanYoXWj0FcN7\nAO01phIHpk6D3joOU+r+n+e5zBHGftkHVTFHmyeSulllywZURXaazYWr2VwYCiRfylkARA5GIeuh\n9LzoIqMyt5qk7XlzzIsD3ak8E3oSmqGhUq1AcrN5n6Q9hzoAJDTdDEddhaYJT5/3X15ej664GZS6\nZq+mZr7WDnXAW3kDwEeNa8xNnT5vb+UNAFIo3XSe0DSnYjyq1jzGlGEHp56ezAaAgMD7uz92nks/\nYU9ba430z2hTHFFhhjeQDji7sgr6FShq9rdh0khCNwxsbwxD8iec998VzZ4C2J7oJqQGnb9VKiPk\nP2hcjY5kJ8LyLsjVbVafd3Er77hrBTXPimnWZ3avsFasL3F3tZ85h735Prk/o2dq2j40my98dytW\nb2rJu02+VfIyZVbe9iDGTdtLv2hOb3gGBvbjNoVi/z9XSkPpQqOvGN4D7IpZU3H1cT/AtMO+1O12\nqqzCp5qVdyJlhndNtYq6kaH0wDO72Vw3zIFhAEJfXQqoCUj+BJRUFYQQ+OPzaxCNCUiygNPsbM8X\n7p5iVU734dn93RVqJTsCVIYAACAASURBVPTmCdA7R8OADsBuEk5Bks3Qjic1T5/3Z9s6sGqTGVS6\nZlXyVmVu94UDQDSjv7cl3obGaHO6/9OpvBWIlNns7p5mNZyMQkDgyNrD8c39Z5rnwqpaw7GU83q9\nrQ5CAG82LEdjW8QTRPY99kKyBt9l3F49wmo2B9Jf1vGEBglAwK84I+LdknoKTy/dhKde/wzwpZug\nd4R3ZW1rV95BNQjVuqVPM7zhvXLXh87PytjtVp934V9OWzq+wJ/XPo5wMpJ3tHE8zxzg9mduaDSv\n8tQJG/HytpcKfu/u9NRsnm+kuztce2w2z9hvNK7h6Tc24Z6nP05v002fd+b+3ecvc8rcZmtA11in\ni6w86D20cBS8nyE09Vyxu5XKEcN7gPl9Cg6eNKrb2eQAwCer8Cmy1WyuQ5FVCDkJQ04BkvWl4fR5\n604VDgDKSPP+eilZiVhCw/ufNmXdCy75zdCwQ1FYfeZ25b0zYgbNCL81QYDzeiu8EXFeH4lrnsob\nSFfYuqZ4Xp9wVd6fN5mVynXH/Bhn7GtOpdueaE9/Qcqu+9l17/EDQJc1rWuVr8IJPrvyjsRS6dHq\nsSroLXthV2wXfvXss3jh7S3pE23tLwnzizfzy7raVXl/vH0zVu/YjFhSRzCgQJYkyEqu8E5imTWo\nya68ge7DO6QE0pV3RrN5S7wN1b4qyEKFXNmRNWBtV2Q33tuZf33zf9TPx6rG1bjx5b/i9sdzbxfP\n009rn4+GRnNMgG/CJqxu/xDhWApPLKnvVxO6O0dyZUrmMqw276IwvWs2D8ey++u7azbPvIBwH1Pm\nc01Wd8OoEYFuj2mgGT3cklfwfoZQNbsn9HkP2Axr1DuqYt5fHEtqSGoGAlAQTnXhXflvgGwu4iKE\nq9lcl5wFFeRqs0lQSoUQtkfmWkHv+9IGpD4/DFLAXrnMHDls6GZzvH070spdq8ztIxMAtDmVM2Qd\nMFTEpU4oMEeoR42Uq/K2mrqtCwwtZVW2OZrN12zdBXUsMMJf5dzSFtPi6S8J2T52NT0JnSss7TnZ\nK32V8Cne4AvHNE+fubbjAKhjd0Ie0YpVG9OTB9n3w8eMMO54/37ElNEA9nKe19ROrK94BsrY/fBk\nwyLz/RJnOyPOJVflbUSrIFeEkdCTqAgoiCU0p88bAHZEssM77qq884V3OBnBmNAoSMlKdIR2Q0fK\nEzi3vncnAKAmMAL71UxCwJ7Uxz4uawBdomIbNm34ctYxAN5mczc7HKMJLf33ADD3pbVYs6kNmiHw\nvZkH5XxtT/L1ecuSBEOIvCuCefq8ezlgzT1ffa73BrxVW+b+3bPmuS/0hBDOQL5yC7nM0eZ9NZQC\nkc3mNGh8soqAT0VXxPyySfc3Cydw7C/7ZMqAcC2bKVeYVZIwZIStLys7PNW6bZBrmiFb4W3Ezfu8\nDatvWlENGMLA6uZPEMIIvPCKNWGIYc+/bo149pnNqCJe4am81boGyKN2Oc3Qeko2mxFzhLd7xLs7\nvJ2Kxj2TnD0Lnavytu9Dr/RVOLPI2f3F4VjK2b/QVAgtPdGNh7WNhhS2djag0f+x5+k2YxeSUgT+\n/dMLm8STOkKBdDcBACTWHwe9dbzzGYP281blXaFU5q68dbvPO2Teiw/vrWIpQ0Ncj6PaV4VqqRaS\nBBiBzpxNyvd/NBd3rLwPQgi8/uE2ayY2gdZ4m3ksqubchZApka/Z3AooTRee127aaf5/YfSjedId\nJO4+b7v1J6nluaDoplk7U+aXeFeOkfLdNptrmeGt53zOfftavhYDIQSefXOTZxbDgeAZbd6Ppu+B\nWqSmGPaE+7wZ3mXi9Emn4OBRBzr3OvtkFUG/4vzDE3L6S8eerGREyAy8aELzNM/KlWZ4G7qcbtbU\nXaOiJQEpEDOraWsCFfteclk2oBk6UkYKWjQEZ35Su9ncqnxl64vciFeiPZxIr3AGwDfhMwgrZFMp\nCaOqA1AlMzyT1rSpumFO8iKEOUNb0BXedsUl5HSfd2azPwBENKvyViucCxk7+CKxFKCmK3e4lk1N\nnwcjPSFOPkp2c3IslUDQnx5dDwBC8zvvsaO1AxV2ePviEJoPtYFx6Eh2eia+AbwD1pZ+YIb79tb0\ngCd7lrsqfyWqYN72JwKdeQcP7Yo2YmP7Jjz+Sj2WvN+AjmSXZzIcuTJ7MFVcS2Bly/KsVeeAdEBp\nugHZdZ99NGVdlAT7vmSonmcglWKHd54Q9FbehQ9YE0KgM9q7ZvPsyts1sM89IY/r4iffRcfnu7rw\nzxVbcftj+bs4SsGd17kGBvbEPb/AUFGs1oZyxvAuE//fAWfgJ0dfZt0iBqiy2WxuS0npL2A7qGsq\nrfCOa5B82TN1CUN2+vjcfeKSrEMKRK0mc8nZFgBk1XACUNcl1768fd72hCkiXoH2cDIdrjCb0u3K\nW+gKVEXGiKDZPG/fLhWOpszPofmh6cKpvONaHAnNACCQ8rUDwgxGu9nefTucHUqVvgrzVjtIzoC4\ncCzlDG4TKX+Oyl3Af9BKz2fPJQVr/vdPj4HeYYYnanYh4LcvqlyD6qxz8MTrG8zKXElBCkZhRKsw\n2mfeKpdZfcdc4b16o1khN7anJ5SxZ56r9lVBMsygFJLebRW0rvlT5+ftHebAwUlV5gBJKZQ9Teyz\nG1/EB13L4Nvn06zn7PBKaYZ5+6HFvmjpzz3NIk+zuT1oMpmnP7uvfd66Yc6alykz4LuvvHM3m7tb\nLqJGFz5sXJ31Pok8XROl1t8Ba3Z4r9ncUvKpeYuluwuw7vx10QZnEp5yx/AuM4p137JPVhH0eatl\nN2HIGGmFdyyhQW8dl7Uvs/K2vmxEOoilQBSSqkEkXTNluSpbu8/VHinuft4OTykQMydesSZnCfpV\nnDzCmkFMNiAk3ZqaVIJPlVFTYb5XJGmGVWs4BikQgxGvQDJleJrNkykdck0z9EA7KhP7mHO4Z1T+\nABC1Ku8qfyUkSYJPVhFJxLHgnS1WeNvN5n4AMoQhpcPfl4AywgxLvWW8s09ZT48U3m+vaucWPpEM\nQsTMufn9B6xG20irerIH1bmqe8jmjGxyVbvZzN01GmN85t9nc8fnnr+RHd6bGiLOHPbuPm+7X7/K\nX+lZtz39hZT9ZRxOpPvZd0fN/v0vjzBnN5QrO7NGnO+KmhPb5Ap2O7xSuuFtclfSa8c/99YmvPbB\ntqzX9iTfaPN05e0Nu2ff3ISXln+OsNTsdH/0ps9b13NX3lpGuOVbqx0AYnmazd2tBLvHvYSH1z6O\n19au97z2b1/8H/wHZy+GUyhDCM/FQ8Gv62cVav89GhrD+M2j72PTjg7c98xqRON9v3ArplxjI/py\nwZJM6Xjzox34y8vre964DDC8y4w9baoqKZ7KO4sho6bKbPKOJjSkPj8MX459A0YsPamK0CWn8naW\nEQUgWc3u9qxn9v4As9nZvlXJXXlnjVZXXCPMAVQEVewd2MfaRoMOzemHVhUZFX4zFCNWsOzobIYk\nCYh4BZKajpCSEd6VZr9gZXQ/81hzNJt3psyFEsYEzYrYp/iwszWM55dtMf/B+lyVN2BeaNjH75rn\nXa4I43jpAnNbpB8/cOJIp5lbaH7rIsDU5WswH5fTg+Ls1gF17834rKUBcrV5cWCER2Kczzw3G9q8\nM63t6mqF0GX88dkNCPnM/bv7vO1b9qp9Va7WAyNdfarZYbR5d5vzc7u1GEyNOhpGIgg51JX1ZaZI\n9oWZAXWfDVDqvjB/l1zN5prhad2RrM8djafw0vKt+PuSeuyM7Ma8T5/POV97Lkae+7ztqYTXbmnF\nR5+lBxf+c8VWvLBuGT6vfhnq3uY8+T32eXtmFzN6rLx1XXQ72txTeWu5K2+7p+mJ1z51LpQMYaAj\n1QZlRO5FZwpx3zOr8V93vdVta4emG57lMM337t993plTwP7+yVX46LNmvPnR9l7vq9iefXMTfnTn\nm9jZ4p06ubtBh24bt7Wjrcv8/zVfyLd0xEs6HXFfMbzLjF15GxCe8E6PJbcYCmoqrfCOpwChoEau\n9TRf64bkDFjzfPFat4l5mrqtn9uqV6fvv3Y1J2eFp6x7Xl8RUOFTFXMOckWH4QlvCRX/P3vfGW9H\nVa/9TN/19H5OzknvIR0SEjpEulIFiShYLyI2BEQR9PpD5aJX5d5XQbHAtYAIypULWABpIXRIg5De\nc0pO3XXKej+sMmv2npOQkJAE5vlAOHvKXrNm9jzr356/wfTMWax0xyDt5EUKSdiOhxjTYM+5eRSY\nJjgdgxEYq+w2H3D7YGkmKkxqERuqESB33eR9z/k51OD4GZztI2F5FehIjxDu/mRMxylzRvgxascA\nsf34rgGLndIG8VhnODZGNZaFPvlpkZvgZSphKnG0pVqwrm+DiGl7xENPoQskT5vT8LwDuXXqoBTz\nlhdQPO6rsAXKmMqRmBc/EwCwTYqZ9xeY7CuJg2QroJhF9GT7AxKnQo42MQijeQN0Rt5xU4fteMg5\nebylPwa10idSYXnnfCL58Su348mtS/D0tucwHAghIg8j2DDD30f+/Cf3Bd3PWgO18DU2Fsfx8Pra\nnmFL1uRzOR4JlXYtzXrfXZ33cAlroXFuhYhEtqCG/b4RAReWkRvHlOJbv34Bn//RUwGyGS488Xah\nlogfcC/Du9Uudnd4aMlGAMCK9cFFUVAlL/yaO3uz+O7/vIyb734RQDjJ9/Tn8dWfPov/vPe1sm0H\nGxF5H2Lgcp8e8QJu85OSl+CyKR8RWeeEqEjFDWiqItxXhq4G4rfE8RPWvLxvkYsabznWy+uwY9vx\n5zUPBT4D4Mufqi4AAqjB2vKERevS4eo0EU3xyds0NCTMIHl35ujLl1reXiDmXXRcEVsnpCRhTvXd\nxUNuH+pitejpz+P7v30Zjh20qDXTptYwczcTT2rqwv51drbD3dXC+qYbgOohldBx2xePRW1lDBk7\nA0MxAaIGLG9DYeSt+AI1gfkC/GQ3x4DjemhPt8EhDr551xMAgK5cD4jiwcumEDM1DGR4jbwjXLfC\n8jZTUtzft7x5XH989RjUaC1iO8cga4X6qwfXw8vSmv2/L1+BL972NJ5bSePv/YX+wHGinaylwXE9\nPLVlCfr1jZClCbjl3ZcpAIoL64h/iYXGa10rUIqubA8Gi0P4zSNv4KofP4Wt3ZlhNbdLX7YD2SJW\n71oLrWETVOba91gI47W1PfjRH1/Dd38d7o4W51IdvLTzFQxk/UUst4qD3+1hlf009DYa/y+zvAsS\nebthbnP/M0X1hFUnt9flHqF9xe6M561d9HmRPQHDLYzeLrRS5aJDEKW6GYFF2zBW86ad9J70MC3/\nsORH3tt+1cbesm0HGxF5H2KQyduSyLs53YA5jTNgKiwm66mIWzoqkqZI7DE0FfUVPkm7riLI29ky\nDsUNkwFIwiEy2Uj//0bvWwDoAsHfTv/fHLNMxHJlyzwRM6DrKrW8VQdEcaEpPB6uIcXc5isHX8fD\n6/+B3iJzKRcSKNgutnXmoCoqNnX3omh7Qq5UfAdhMWtOiEYBLhxk+k1c87MleHNzHwaGXMhtTxW9\n6LvMAboA4W5zVodOHJal7hIYGvMU6P6POGNnYaksN0Amb7Yw8VAU4QNSQt6KXqTKdVDgekR0U4Pq\nghCC7Sx5jeTSKBRdZLJ+jTx/6QvL20j6fd+lmDe3vFNGCiopDy2IVquOKch7xY4NAIA7HlwJQgj6\nCiWlSxqXf6WWt0tCrErNRUXSxMBQEUosCzVGX3KqomJd/4aApekRD//x4m345Yrf4cnXaDLQ+m0D\nw8a8S8l73dYB/PjV22GOXClkfVHiiXpdcq/L4C9xo/0N3Lf+TxhM+fFM/rIujXFvJstgtKwXf3O8\n2rUcAwU/L6DUba5YGcSP/Jv/5ap/H4ekKgO5MmRf8HZ6cpcuSDj25DbfuGMA/3X/soDrfTjyPpRy\nuEuH+HZi3p0lLU7DKjhMQyv77FBBRN6HGHi3MZd4Abd5OkGJQybvmKkFpBh1XUVrbYX4m3gaeofY\nKp9ocDtHiAYn/Bz+viGPQpjbHIA1eSn9n5KYNz1GBzQXRHVE85WYqSNp+eP86/q/+brmtonnV+7E\nt3/zIlxbw2CBveTYGF23xDvAPuelal2d0o+LaDSBTKUdxVy1ECBcriInn58vWlzPg6XSfXVDJu8M\nYiodu6gVB+ApLC9AsUXSXqAcDzSWbjHCdlxPiKcomgvXI9gyRInMy6XYi1AR96efuXeHbE7eKSGB\nC9XzrT5meadNSu6EoKScboiGDYgKkqXPhpyYtq2/F45EzqpnsnI6D6ahwnY9IfIiQzc8NFbHA+1n\nTxt5MmY3TAfgl8ABtAFNxsliTd86keCn6wrk05JhyAYA1m4rr4tWNV8NcHcQOvkshGEnt4ltnHxl\nK01uHSvvs7p3LX6+7C68YD8ItXon9Kb1cFwPHgsDFG0XetPGkkF6tIwS/n0EEBDuKRsv8fZIzm8n\nbC1n4e+N5X3lfzyOl1d34bkV5Tr85eMoP9ef1/zfbtX+9hV7Gnep5e0GLO/wY3mf8mSMl5mW73co\nl5lF5H2IgVvepJS848wFzcmbqIgZQfI2NEVYhAAATxUPKIXix39RalmHrDBD3OoyFATd5lTpTYei\nuVAU1gwFQNzUUBEP9oC2XZ6lreI1Fssjju5b1oxcbVt+80iWM3P9J9W0fz2uCkUliM/5BxQzD6J4\nAVc37bxGaDy9pDOb6xKYjFw1g373uv4NsD0HMS3BxufPnY0C8k4BLmxhvYfNkaHQc7oegcoFDVUX\nRdtFX44nDkpa2GyBMsAWXZt6ekA8BU7RD4koiivkTLmLO2kkqTEqhwYA5NwMVI/OPfdCFIlv+e0c\npLHCZHYU8svno4KwzHvdEfK8A5LL18vTcyUTCpK8xpuPQU+IOZRlcLn17xFPJPFpqor/emCZf97d\nSHgOZotlOR+q7ore67sDf4nzygoS8/MBuFXtegSKlYU17Um83hOMsfMXf3+BHpdVemGNewVG+5so\n2g4efHo9rvrxU1i5cRfUip7AsUrA8vYTqoazvPNOAf/+3K349crf7/aa9qTbrVZ24U3mPQPefsxb\nnvdk3F+YD7eYKP246Nr4+6YncNeqe3Y7vr2F63n42h1LcO9j4W11AZQ6YgJW9HCaCNt76LuxtoL+\n/sLc6283Uc0jBKs29r6jxi97i4i8DzHwhDW3JObNLW9D5a5XD5apo7bSJ0VdV8vIuxQyAQXIhoQ8\nCnsgd/m7EjFK3rL1yWO0MVNDMhaU7LSJLb6DJxEpngHD8jCiIeWXAknhQSK3PWX/5vNAR2MaJ8xs\nDYxXYdnqPMnMMjV/PjSnrDOb43rCbc47hf34lTsAUPUzOugY7E1UCtT2CljeQ12w3mBN4FwyTGbN\nP7NsOx54YpMYe9HxROa9fJ9UaIDioj9TxKadg+jNDQKOibVbB0RCG1RPlCxx8t64Nc+6z0neBcVD\nkRRgEDZ+fq3En1SejY5CEqZdA5PF8hXNgc403Tlx0fmk280YEd4WbnnHtLjwLshKev1539LnBNfd\nnwsmj9VtwD82/QuATzAXnzyOnstxxaLW3jyOnch5W+QtkvGYkp6iEtF5T7a89Za1UONZPLL1kcDx\nolQupMd63inikefpPX3hra1Q48GMZyhyzFsi72Es78c2P4nOXDde3PkqVvS8OSxp7l6m1IM14SX8\nz9q7sXGAVkS83WzzjTv9+/R2Er5K8wGyUmfEMG/NviKTc9DVl8fGnYPIOTlsGiwvS1R3Y3kPN/4u\nFs/mW12XAEYe8SMfwWObnsTTr2/H7//xVuixpXjxjU6ahf9WePjmQCAi70MMgZh3wG1OicVQfOvN\nKnWba6qwfADfsm6o9gl+WPIOURIjw7jNOUbUVfqHqwomtlcFysc42cRMXciJcnDxEz6GuKVjYmsD\nHGLjcxeNRnUFHWdBTiJ2Zbc3V3BTkU4YaKyOB65HJOUxy7syYYpriM96TKjQnbuQkoHjEphsMdLX\n9Dh6cr2iZGt2zZHivM6OUXD7a2ATGy/upPrvXBa11G0OACZbbK3fPuhnzGsOirYrVMrkudVVHVA9\nPL9qJ2761QtQDBq394h0P1TXLxdi5L1lexH5okv34d4JVmGQGeT3UQFxNbjwJ7UvT4nZKRiIW5oY\nLzQbpk7H1S/FefkCSTdcn7wNej5L8cm74PrWZU/Od3trjLxL1dOMjlV4YM1DcDwXhAAT26tw1GRa\nG19winCJC7evDs72MaKigTeMCUPfUAGPv7JVkJDcjc4cuRJqRXfA8pZDQNLFis5hgYQzBqphH1zA\naH3t0gLDldzmEnlb4eQtJ/r9v9fuxLLulaH7DVceV3RtaHV+WODxzc8AKPdqEELws9d/jf9dG1yo\n9PTnWRc8EiDm4cgvb7t4+LmNWL2ZlmxmbT+GvCvfF3rMvoCX5xUdF//96p34/gs/KRM7KvXWBGr3\nh1ns8NACfw4c1xNVDH9a81f86onnsKnTv++7s8K3sERBfr/fDUTkfYhBVcOzzbmVoTGZUUV1ETM0\n1ErkPba1UsiE0pPQ40c2paXPJHeYbJmXan5Lx7O9yzaPb6kVC4yBrI3KlIXjjmgX23lTkpipiZcc\nh6uxlxnLJm+pTeDoFkqSd6+6V4xHdpvLMWthgbsa0gkDDdWJwPWUlsNVJM3A9ei1NN5cnaIuccfz\nhCeBqDbuXf0AvcaqMWhPjQheuEv3W9e3EaYSA8nR+f33y+eXzRG3vAH4Cxtmeedt+sKvTib8/TUD\niurhjU19gOJC0VwQx4TrefA8iJg4J29uUaowaRmT7DYXjVmkBZur0wx5Bp4QZ+cNxC1dkLeiOeLe\n9hcly7tAnzdV84TbnBOXqcZD3ea9WXo8IUy6Vy+KEkYK/9nryXK3ugKTkXPe4wsxQ1wDNCcQ81ZT\nvbjnzT8LBb8f3PMq7n70TSxdxWK3hpSAVbMT1sQXBQm6HvGrGmRIuQWDdjl5F11byMNyD4ilxv3K\nDtUT3gWZvLmGQSl6MoMwSAIN8ToAwxPgcKpyj21+MqDBz/NKZPL9xV9XYe2urVjWvRKPbHxMfH7/\nk2tx9zPPIT7zceitawLqdjIxquke4bnY3p3BH59Yi+/9lraslWV4d2a7Qse4L8ixDP9C0cP6Aerp\nKG3y44sJ2VjbtwHLi0/AGE1DII7r4anXt+FP/1obOIaHRGQJYPkdEZu6BErCf/YzuxGl6WFW/HCS\nvgcCEXkfYjihbSEA4JSO42GZ5daAcFUzy7u1jr4oJnVUY1RzhbAeAQh3LHe5A74rm26XasI7R8Dp\nbIPT2eZvl9zQJJeCN1SJRNHvuGVqJs4/kVoZU0ZS17HIqAZQLPjkHbf0gIAMjAIjW7pPU20Ccxpn\nYFrdJKzr34AhbTs7h3TxcsyaK615GtIJk1qBsopcSUb9rPH1qEun/HMxxboYUzVzXSL01wFfxtXQ\nDJhG8GfCSSTjZGEp/uKptT6FUpiaLITj66sXbQ95FhNorvGTDGO6IVnOvshMNu/QlzBbwMiWNyGA\n6rG+6l65d0K27ImniVp2wE+kKuboPbKYWA50h1U7EAzZQ1DsONA5GvYW1pVMc0SiD0+aMxCDxa5X\nJm/umvcGqJiOVrsNg3JrTql0atsQJVtVVYVlXfAYKTAvCl3EObAMSU9/1HI8ufVZPLLhn3A9V5RM\n9bA2nUqImI3sNlfCyrcUV7yMB/dgeXMPSEz1PUCK6olqD368O1gFNZ4RLm0ZWTuHQk7DB0efAcDv\nA5ArOPjt31eL/Rw3PKntjV1BFy8PXcge7KGcjd89/1TZsX99diNyBiVEo3VtoFc5J38lNgRr0guw\nJtM6/lK1uqyUUf+/ax8WvyEZL+54Bc9L/elLUXSL5fr/kuXN8asVv8OPX75dhMe4Vfzwhn/ihy//\nP2zxVkKv2waoDhzPw6/+7w08tGRjwAshpH+55e2RMg+kKpP3btrfdrPnbLgGPwcCEXkfYphcOwE/\nOf67mNVwBCyj/PYYnGBUDzFTQ1XKwq1XHI0vf5hm+fK4LQBBvgH3ouwelC1vosHeMBVeVs5WD24v\nrJyPKUnfhWxqBj588nh89zPzMGMctRZiElnlmfEbs6jbvLBsIYobJ4rtinR+njRycvvx9OsUjyXE\nlMfdrSnPCsubeNTytgzNT3aDH1fk1xC3NMwd7y88eDcxUzOggCa1aFKHXMI8Dbyvugw5NGCqscC2\ns0afiqq+2eLvQHvOgHyqi6JXBCEKWup8z0jcNH0vCCccx0Qmb9MsbE+lMe+Cr3QH1wAhCLjNdU3y\nTngl91+aJy7/6hQMxE0NMS3OzmvT5iu6DZe4UPKViO+a5hOo6kiWN53LJ1/y+8bLMW+e8ObsGAli\nGzDa30Rfwbcqq6sly3Dlb2BNfRquPgRNVaEqCoqk3PImqitCSfLcPrrxMVz1xNegN9FSL9cjwoPh\nZYOLq0DCWgi5Q/WEBSqTN/GYfKtr+6ED9jwljARkHf3BnA3Xc7FhYBNMLyUWMLe8eBs2D/ou7oJt\nU8liR0c2xyxCRn6PLN0kyc8SbM9tx5cfvwlPrH8hMNwEy81wulqhQRM6/4E4t+KhR6WJX2rp619a\n5PXYdBHVP1TwNQXYgpiXBZaSmWx5bx7ahqU7yrPOf7Xy9/jNyj+Iv4u2G9B8v/Wl/8Y1T92EXfle\n/HrFH7Az0yme9VIZ1NV9a6HX00UQJ+A1fesD+6iJwYDbnH+XJ2nYc0+G63riPnLIev6lynUyIvKO\nAADQVPYjUspdedzyVlRPuNJrKmJCwjBgeTOrNpDMEaKqxlGZNANxW0teCDA0VfrkbmoGFEVBY7Xv\n9pXJm7+EYqbGXKBKILNazlavTtPjWpK+znhpkhx3hauJId8t7lLL2zTUACnpsWLgHKauBRc2jNhM\nzYSmqXC8oOXNyVtX9fLYqpQ3YKlWYNOpI09ERW6cv13zr5d7PbTa7XijawOKjg14KtobZcvbpN4F\nkDLL2/OISNrjpMFnvQAAIABJREFULwlFt0EcAx4jb3gaFIVlC3P3eWkSIRfaAc1GB2huQNzS/fun\nOYiZmp87UIhTS5yoILaBIjJSzLsI4mpYsqwbdz9MXZM524/r8nixl6mE09kORSEYcCh5X/Ghqaiu\nCU6vmhhCwaRuV8NQy8ibXoODdFJ+PoOWqNEuNVlhiwsu7AJQFz63vF3J8k5qKcQd2kRGUT3YbJ4H\n7SHEtBgaN58HZ/toAIBDbL8Gmn1Hykj4vyvFQ6Ho4q3e9cg5eaSdVnj9dWIMXUO+KtiWHuZKdw0M\nZei4RJMdRhp6y1rE5vwdD+/6LYrI4c8rngxc85Cdpde1fip01RALKNntrbe+BcegnhAufyyseOn3\ns9T5E17dvAFf+q9nxCLHigWJqbQ3Oq9l93rpb3ht34bAdtlb4HgObNfG13/+HP7th/8Sn29l5ZM3\nPPtdvLDzZfxr6xLkmOVdCHNJMw8cH2NNrDqwWUkMBkrF1vZuxuceuwbLu94Qn8ltb6EHr4mrJAI0\ncY4QgnvefACvdvnhCdvx0McSE4frQX8gEJH3IYzm2gROmNWKL15whPgskE0eAqOEcDVVCcgbkuEs\nbwC1lbEAoafiQWICgNYaP0lNjudyWJLb3M821/06TEk0hYu4AEBVih4XsFRLM+Cl97OaYGVWsuWt\ny+QddJsbuio0vGWYqgFNU6h2t7SY4Mk3pmqUkbec9CeTsxibNN+xEMtbjWXxt/7f04Q1TxPudtNQ\nYeq+Z0V0RXNMZPIOtSCY5e1fqA04BmzH893mAGJxlLnNP3bqBEHufFvOy9JnytMRs3RhvampPjqn\njLy9giU8EKQYR8YblFzGRX9O2Hf15XwrLONkaEzZMcR+3EqLWRo8zd83zix/T6X3z9BUOGD3UnwH\n/d5kXAqTlHTVcwerpG1sIWeb8DIVYpz8he95HqAX4RXi+FjHlTAddqzqsg531PJOm0nYDoSHpugV\nxQKAex8qrKT4XXHPx7Iu6vKOF5vhDVXD3joGAPDcm742+NZeupghjo6BQWZpMsubX6Wa3hUoAyxk\ng7+/jJ1hc6RAgyFCF778bT/05vVAMQEvkxZiQaVqfRxLNvtlc+NHVOH8U/zcDzW1C/IP8ub/eQld\ng9TFbO8YgYQex/qSJjy25xPj7ct+gy8/eQN2ubQpztbuTKjVammmkKQNI0au9Oc4dCxyxjtA3d6y\nbsCDq/8BAPjjW38Wn/FjHdcLvEMAQElI5J230Vvow5Nbl+Dny+4Sn+8azIuZiCzvCACoxfzRRRNw\nxBh/tT4hdQTcwSoU3pwdekwpucdMLaiQJJM3CZKZkDdlkBOpONpqq6T9yxcSlaZvRfKXWMyULT//\n/LpE3tzy1lTNJ9mSxUVx3RHwWMIUb0nKY96moQlXOOCX9nDi0jU1IBwiX4OuKtjUOYTfPbpOfM7L\no/RQ8vYXKLy0SoYWIG95MVOSw6C6UIkuLNiKhCnlNDjCCiCOQd3mhJM3LwVzoai0tj5XcFDgbnMA\nlim7zTUoACxDCyTNAUCB5JDQaC5CwtIRN+j86rU70KmsgWKypKd8TMwDKcThEgdEp5nJil70xXDY\ngi3LuscRQtDv7PLbz7Lvz7t0e6ezCTuTVNr0kxMvw1ktF9Ahs/71pqHCUYKVA2JRKB5PAhhFjEx3\n4OjmuXRqpC588iKosHIezGKNyDsAaLKiYhQB2/QXSACgUMvbIx6G7AzSZgq27Sc2Op4jkTf9jsp4\nWlLCo9v6cixbv0jnmeTpwAekBc6OPhZGcA08vIS6yGWyo99B5X7zrx1L50hx8M+XttA+5ZkiuocG\nxBzpii5i5tzw1iq7oShAYeN4EMeEogDZYkHEkkvj/tttP8FLU5WA0Iw1+XmYE18AJ/A1W/rx6rrt\nbJ4NjKrsQHd+FwaKfqWCHMte2fMmrftnv+MbfrEUP/uLb81yFN2i0DRwPSJCFv7AWNWJ68sJK0RF\n7oVF9JqsbMBtvnEbnfNdhV6YE16A3voWc6F71I3O3iFpvQLENpnbnC0M8g5eWeu3C/WIhy2dQ3h+\nVac/3ihhLcJwiGtxFFfNg9dfH7q9lLwtUyuxvOWENXr7501uxPc/O5+2/pMs7/GtJf5MMMuCn1sr\nt7wbEv5Cg1tIPGv5e5+Zh7PmjQ0dK7e8AYiM5dIMYJJP4SOTzg1+oWR5u92tZePxX8R+9q0MQzWk\nemH/+3gs2ND0snri0fX+NRoh5C3PtxJI6C8JA6gudMUQZXQVSRNtKRqX16q6AuSbZZa3r89OAuSe\nLTjCbQ4Ahkl8kvc0aBpLAJOS5gACm+TAeSNmakjr/uIrhz5R1uQWLDEPXoH1ZlcGac9yzRPhEL5Y\n4q1fu3O7YKMgLF5ueXsKJYo3B/3yqE1bbdz10Dq2ncdXM7Br3gKgCNLjC4BYjL2U9SIUBUjoSVw0\n4TwoTgyKbqOphu4vXP/FGEBUmAodP7f+O6uepIsgT4Preb4YDot5d2W74REP9fE62K4nFp02sSWl\nO2Z5mwkpt4Fu4yEEl1Vf8DnK2QUUbRe/fGgVVm1hjXpcXWyX8wb4dRLHFGI7ikoT2VZv7sMdf10B\nWymI+VWhS25ztsBgdegkmxZz2DkwhKLtQW9eC60mqKrW7/o1y6qqBKRhAdAOaZK1nnPZ78s10Jyg\nZX49OT80kA35/cmu+tfWlau6FdyicJtD8VhIyQfPc+GLqEwxA89mioKuCkVzgqV10m9Qq+wRTXgc\nhzDLm97Hj7Z/Bl6mIuClsl0Pv3/CL9+7c/n/4DtLbsMDT/niMZHlHWFYqHu4Y2aJNWwZWlD3N1Aq\nRh/kptoE6qvi0FQ1QO6TO2rF/08fU4svXzjdj8ejNL5OURvzCZ94Kl08MJd5Q3UCR030CVa4iAGk\nErIrmrfwLL/YMXV+TJx386pImNA1Bc7WsdQqIeUxfuIRVFoVpaeDqRmi5EgJqXU3VCMgvfjZD07B\n5R+YIf7Ww8hb2t+TpEcntNUFd9RtqIqOuGR5z2+eCxAFesNmP6Pe1ZHJ2zR2yaw6a+ozfptXx0Au\nHyTvbGIjI2h6nzVNoeTLXtqJlEvdpooHz6afxS0dST2Jwlv0+lzFFpa3V4gL0RauVpbxBkUSk8hl\nYN+/dscu/Owvy/HEm5ScSYaFW3jZGrdwTN+789yrg4J8XOY2R7IXUF2MVuaCFBOB73hsgJYUcosx\nriawoycLt6hDt1xRiREgb0BkxOftIjziIR9jFmM+IfIKAFoOt2pjL/64lNbzt6aaqQyqwlu32uVu\n81hSkD/XyOdVBbativtJPy/gsZe34ull27F5Fy2Rq02m/aQ/j7vNFYgcCFsqeWT3dzBrY8122mKX\ne0Dyeep2J4SAe43VWAbEU0AKcXGN37l7KdZvH4AxgvUzcHTkXliEpNMIBzZ4GZ+mKgErmkP+zdhM\nuY84hig5zEreroxdImID3+1N57A8abDgFkTCGkqSyVJGUhCrIyzvrK+q6OqA7gT7juvhSWe268F1\nCfNuqMgXJE8Zu8ai7QbG8GrXcmjpXvEbEfu8S4jI+z2GcLe5VPIVEvMOWJYSudek/Bfr/KlNmDra\nJ3MAAUEYDpnc4WkBlzk9p2+5VyV88RiZ8MQChBGVSGarS6IuLnkDPA26ptDEKkUBoIAUEtAhuarZ\nS8ojBIs6TkBV/wwa72OQyZk37pARqJsHVZKrkhYBCb085i27zT2pfj5VojKnKABcFQ3VCUwbXYu5\nExtQHauC5VZBiQ8FMuppqZhfh6omhkQmLHENDOVtFGzfbd6XXI7p09l99VToqsK6zrH5GPMMVNZb\nmj8TBduFoijwhmhoxEYOipmHQhTAlmLezPLut/tgxIPkzc/Vn83h+VWd+PsKSt6lljePLTpMMCb/\n+kLs7CmIuHavthHfff5HUHU6B5br51qIlynJQzFzIt5tKQlk8g6Ia8BVCjBYtUYpefNcjbybD1iD\nzrYxrByPC9FQ8n19K81gbk01U8ubu80JJe+EpQuXdtKK+d4PI0jeXPdAdPDzCsiKen3672lzx4rf\nnS2XWqksROKYoGI7qng+sgUnQJwAkMl6ICBwPMePeceyIIUEAFXyDri49wnfclR0ByAqFM8MzLWm\nKqHlcmD3UYkPQq3sogtqT4NC6Dhkb1fGLre8lVjWJz+jnFjzbkGUivE5cnc14gOpy2CqlvjMcT04\nnoO8mxeqisTVoaiOmGN6fSXfwd3ujkcXAJoNuAZts+zySgJ/n7LjpTkChkmqO0CIyPsww5566IZa\n3oGYd3mdNycbz/MCCWvyQqBUfrB0eygUglhJrbpsrVt6+PG8QQh/iR49tQkfP20irr5oBkzNpCtu\nNv50wixrSiCTN38RVqUs6KqO6sKkQHtUUzVEOdCYmjZ888jrMK5q9LDXaGhqoJZ9XHMdPnDkCNx0\n2Vz/slUFhVVzoearsKDZF27hgh4yKhMJ6JqKL104HfOnUq+CiQR9YRh+0h0tFSOB8h7enAWOgf6h\nYHY9APSxzm10kaMGLG8A0OtYwhT7bOG0ZurZYQRAyTEPnSQAKNA1BQumNWFEFQ3Z7Mr3wUqweHKJ\n5a0YBVimImWrJ6Brip/YptlQFF+qlQghGVVoxW8Z2ibmwHN8qV23zw8ZKUZRWN4WErSch32HbrJY\nrsVkMJnHIGXRf9/Y0oVfPkL7NDudbSDFOAtNcPJmI0pQi7Ml2URj3sxtTsnbRdzSka70UBmjrV1L\nyZ+7r4u8RxD7DRb1XRhwugHFhcZEgyqsBGrScRAiuc0VOW4vJe0x0ti4Y1C4r0lJ7kHRsyl560W6\nwGChB+Fh01ykkiE04AYXWSqzvHUvjuKGSbC3jaLbmSWqN9MFDsnR3vSKS8chk3e2pH4bAPSGLYjN\noNnmhuWTYFKpggIFBafot2FlY/EKcShOnC7CJLc5j6kHLG/NoUTMUGrdK6oHKJS4HZewcj0ahvIX\nOPQ7MnlbkLfT3QJnRzubA7o9bmmR5R1heBT3QN56iaVoGcGENeLJ2xXpv1wmskSqkyGsLWCY5Q0A\nY6voD5vYFsa0BF3VMtEamoFvf+JI3HrF0SXnpeTI5V1jpoZjp7eIuHiVVcmuRRMNW2QYEnlfcfZ0\nfP7caRjTSo/RNZW9YPh1aSJO1VSTQGOqBhWmbJkH57M0/p00E/jwiePQ3ugfo6kKvMFaJDedgOqY\n/7nc7IGjpabc2rcU+oJVOem4GnIFF7miCy3mZ1VziyWQxyBZ+l051vCFuc0NPRgWEYlVnobPnD0F\nNRUxen+IBuJqKHg5KLotXsS6ruITZ0zGNefT+9WT3wU9zsnbEucCAK1yF5qmv+W77l0dLbVJsVDQ\n67Yj1rLJF3MJkZYFAM9gCnBF+twkYjq83iZUDbIKDL0o9NKTajWyeceP+zJLTjGZNeZpuOXf5iPN\nyHv11h68vtFPsgJo8ppX4vZWrBwsNYZ7/rYJBP4C1CU0YU2zCsg4Q+iobGGeJhWEKFA1Rt6eDVM1\nYLOsZrHAqejBC7gPWsNmaJXsGow4aiuo0EvOLhey4fFuIkkFr9vZi9gUKpzCFy5y3NzziEgMEwtX\nISTjoChJ2Y6qYhnlJeENVSXoLw4grqThdnb4izUtaBUX3jgyMA5ZMjUjZYIHcmMAQPEEeRc3TMIM\n71xYmomiWxAxb1GD7eooFF0YqinKHh2X+Cp2IrFRh6J5yBSkeWTkrQ42wB1gZWWqC9vxqKdDs0Fc\ng3lwuOVNv38wa4v5cLvaUB1jybvsGY9behTzjjA89mR5lwovWKaGie30ITthVmvoS5KngHgEQQlR\nibiUEPIu7fTEccX0T6Bi2wkgmUqcefTIYcdqqDra6lOoqYiVfS6j1Hr33dYkKNTBj5dUz2pSScwc\n71tqhqYG6n0BX7iBZ30nDD9cUGZ5l2Sex8JKxbgbnhDELR1nzO/AvMmNWDituWxfSyuPmXPyVrhl\nzd2sRRfElPpCM3KXQx2Vdf6LWGQrexp0VaVubzlhx2KuVtfXntfZfSaOgSFniMqzshc5d5vH9TgS\nehy7cr2iJI+/zD+4YIw4f6eyRri947qJz35oqug0BwBoXYmCW4ACJZDMJ5frOBq1evM5Rt5snKpD\nCXj2tCTMup3wCnGkvUYqYcleuq/iL4DqUPJm46urjPueE83P6OcvfM8jovWqxvu6aw5yWQVLWJtM\ngy1aHTh0MZ2gNdod6XamSgfAU6FoLJud2DA1U2RNBxfQ/iKNz21N2gI8Ddtz23HHsrsgMvqlccLT\nxMKoM+tnO8ulcIBP3rwlqli4Sm5z3oa3Wm3CN46/KuCh4ffC1bLwiIdxDS04YVYrxrXUse22P5eA\nOG57Fx1vgLyZZXzVjE/jqhmfDswBvRd8gRLDUMaFpZk0YU3EvNn8cfJWTJFQ5rgeduWpp8kX86H/\nDhX8MSi6DS+TRmbVLH8Bwo7f5qyHogDeUGXAbS5yC3K2mA/iGJg7voWek2kimHpkeUfYDfbU67fU\nhZyMGWitT+G2Lx6DxaeMLy9XAkRrPyrmIFnGEonK33v6yJNRH68NxH5lWJqJq886ATd8bI7I+JXB\nm6+UeglKt/NyH8sILjgqmeWt6DbSyZBac0XOXA9+h6YpActbBpf7TOp+LH5PlndY9yQeo+eqcecd\nNwafPnsKmmuTqDQqA/uGldvFWekWfzEeN82vr1WkF7/O483SgqxdnVZ2PuL6lrdcIy7I3/W158e3\nV2H+lEY0pqtEaRBPaNOlhUtNrBo9+V7U1zOyZy/CU+YEdeCJ4oB4KmaOa0RTTQLfuuyowPa8W4Cl\nmaiuKF8EAUBRoyV7OUbeosENK9dzk53wFAfurkbs7M0hm7eF29RGHlrtdroAkcSBYixPQdHcMnf0\n8nW70DfAXMUa70jmBBZIwuOkuFTVLk5Jo6OizV9oen5M2iU2DCk8M5yXAQDqE3WoqYgJ1/1rXcuR\nJf2wJlBJUd/y1oU7l3sv7O0j4Q0wi5aR8zPbn4dLiBAb4QtX/syYY19FhtDx1+ktSFspGLoKj7e5\nZZamrdHjm5J1+OiiCWirpgaBYmVhjnsZWsUudk56n555lWaqb+rpFdfG3ea5IR1JI/heUHQbnsEs\nZ9tAf6YIS7NQCIl5wzGQLzq+qJLqoug6uH/NX+k1MhU7fo0i1q7QOm6xuJcaBdmOh60OFW5xu1tD\nLe+hrB2o8EjH6Hvig8eOwHc/PQ+WoUUx7wjDY97kJswaX4+vLZ4Vup1nVNdZ9Zg2uhZnL6Qu7GSM\nJWaFkTezvUvbBcqiJvKmM0Yvwk3zrw0mp5WgtjKGUc3h5M7JebiYOd8u9MdLkt7SJn0BKbqDdLyc\nvGXLu1RIxtBUv+SoBJwYZMtbLyFXTmAfnXQhxlaNwsiKkqYlAM6Y34HT5rXjU2dNLtv2hWlXBWr0\nwzL2E5rk1lc0jGzyCb+m+xhfI54n+kj3tEFvxw+O/ffgCT1VinlL99RgbnfPz3jXVBWfOmsK6lL+\nvXOKLAFLWrjUxqphezZ67R7qvuS92y3//GkjhYq0CsXTce6xNI9A04KLy135PliaiY+dOhFnzO8o\nmwvCwgCdPVRHnN8jTmI7MszqtC1s685Qy1tWA+WhBdvCSbOobn/CYG1Nx7wu+otzwn9pdZcIJ2ga\nK8nTnMACSSgPcnI26QJjRLoVlsmS+jwNHmhfe9uz0dMnJToNoxxYvXURLM2kSoeyVCk2+vMhW95C\n598RcyD2Y8f/c9OT6NPXQIlTD0ap5a1oHvQxNJuee5Fk8halWCo9vi5OibE6Sc9jtKyHVs3ugfQc\ncm8N11bo7suhM0sJ/Sf3vFn221fTPSC1G0CKFrxsBdZvH0BPn4OcUxAiLVzbgdgx5G0XGgwxxgIZ\nQme2G9PrpooWvdzyzhTZ74Qt1OIaj/v7mgeO6yGDXpCiBZJL0wx1fs/ZImkoZ0uuewOVTGggFgcq\nUxZMQ0XRdvdoYO0vhJs+EQ5ZWKaGK88tt644UkYS35p/HSrMVHhMOqS1J3/Zlbb+k6340pZ77wS+\n5R1O/pogb/riLiXvuJThHeY214gpvqd0gUHJV8XE+Cx0NFQFtnELf3duc+5Wntc8B/Oa54SOP27p\nuOD4saHbUrFYwPIPu0dJPQk4/vbqtH+9llcJe/0UWJOfB1GZFSBZhZahIaZbSOoJP8bo6dBUBTFL\ng9vTgqJuw+zw5SHh6mVd32TLyCkyYpeItyZO44V9hX5U6JXg7RsURUFh9UxY41+B7dlIWAZqrKQI\njWglnouck0M6UYdpo2sxbXQtHlqyEYWVR6FuyhoMEhazJ4Bjq+hoTQjPByfvXqaRbqoxbO/JImbq\nouc44MtbLpzSjounUNnadExanNWzpL1Aq1z6Ha45CKj1rCpAmmON11mzeD57oSeNhL/wJCo8uEhY\nOlzVLbG2gwsYTt5xRp7phBH4neaJn+XtDXBi8suYeLlVIJ9B+v+COgjVGoKXjwsPguyB4z9z/rsy\nNBW9/QRWo+/9Kap0DPUsVl2TKM/VCHw/I/8iyeP1tT348f8+g9gR6+EO1ACuIQiZQ6vugqIAxS3j\nAU9HvujCLChQzaLoC6ym+kEIdWsXii7i4HF5FzZT4RvoKxeEGsxnAZjQ0rS6okKvQhcQ0DywHQ8O\n8ZUCs3lb/K54eCJrboNVxWrfPRWVcbqIzjssVBUbgDbiDeSKp5XNzYFAZHm/B1EXrxk2mexblx9V\n9hmnZbIbgi61yt8JRFx+mFOqSnncXkZCcmvL7U55rJlnm4dZtfzlf3TNSTh7zKmh35PYjdv8ncLU\ntYALN8z7kNb9a7JUU7jhAZoMN29C0NqX3eomW4BUxZi1ThSAKNBUBcmYgfqqONydI6ES2UrSAhYz\nAD+jHxAvYpng5Xr+ilgSs8bXi0XloglzUUmakXcLyDm5gJiPripCHpQjVhL394aqMdM8xf/A1QEo\naK5Jipp8txict7pUBTp7cxjMFuF2t+KktuMB+PKWDekKUXWRNMt/G7LkbYJtH4qvg9FOFzky2QkJ\nYJ6Mp9iIaVbwuXV05L0cYpVDVMSmpLJDBifvGHvuUnED8o8jD2r1FlbPFORbEaf7ajXbpQ575RoO\nAPWsKUYRhCWr3fjxuThnQfnikqvrmYYq7jl3mxdUujyrZ5Z3Q0U5eZeqNxJPRc7JY9naHiEA43bS\nZ3XZup7gHHDPgOwV83QoCi+1I1CT/SC5FFRCyZ9b3takpbAVapWv3iDVkrt8AcF6rTdSQZZRFvOI\nSZZ10XHhoCg8BnLuBPds8GeBjRhxg3fQo+SdSa6F0bwBm3qD7UoPFCLyfp9hREMKN867BjcvuEF8\nxt08Lvt3XP+5uOGoqwPH7U/y5pa1S8KTO9QSy7s05t2UpOpN9bHaQO05fzlzyzuMGPnLP6xjG/+e\nZIjlffHJ4zB+RFVACW5fQL9fETHN/kJ5b2ceFgAASw+St6oquPC4oDu+QrIkeQ9s/pKloQefMK5f\nPBvzpjQibUrk7Oplcyxn3E8f2YxPnDEJx83wBXZ4xj9A5+vKc6dhFksMvPCEsRjZQL8/5+QDSXma\npsLZOg7FdVP9awxJ2otp/vg4cTbX+Za36ypI6v51N1VVwiME67cPQFVUnNi+AIBfTiff0zE1I/zs\neAauugbQOefQG1g3L4mYDFWjiyJmeXuqLeLoHPa2MSDwUGxaRj/gmvNmubdJMQsgRKEd5cDIW/N/\nG0VlqGwMHfXU82GOXAU11ReYJ/n7AKDIiJfY1LXb0ZTG+NagZgPgaxYQ4ru9eYJWERkYqiEWdfXp\n8pBYaSIeHAMFL49ETIfCeoDzkM+ytUHyVpmSn+w14Za8Yuap7oHmwstUwjI1arm7vuVcTG1mx0jN\nhQIxawIt3Y8RqTbUWLWB8Sqai6ydp78VtmjpzxQDx1tSCaCzg4Z3+D1/bPNTeHzz037mPQn3KO5v\nROT9PkRDog6VVvnKmbvGLSWFpmRDYFtIXtY+QxXkHX5SlcfaWcy71FoZXdmBzx7xcXxp9hXB47ik\nNCPv0pp3gFs1VIq0FMJtHmJ5nzJnBK67ZFawZn4foCgKrr5oBmbWURWz0sQdAEiYMVHrbGkmkjFd\nkLKmBUkLAJKmFONn19DMFjgcfOlVmbLw6bOmoDImddjytLJEx3qplOeoiW1YMK1ZzB2AwPOTCLsG\naQ5ly5vfS/klHUbeimv6nhNO3jVJcbzreUhLY2itoZ6ATN5BIqajwkoHqiHkMVZYaehvLkL+9YXi\nM/k+xIzdh5scj8BQLKiJQSjxAXhKOXl7fY1I6Wm4Zl/g+NLKCQFXF2JKybgRVC5TWaxXImdu9QF+\nXH+4RLiiTscwsq4ON1xKQz1hiZIJk96zwWxRsjqZ2xw5VJgp8ZzIz5x8DYCfsEk8DXllANvct0SN\nNo9Db9hRrtYG0JJD/qyLOTviaRH+IPkkYqaGfNHBUM6fI0/3NQ9Kx2N0rGJzRFATr0KMe5mE5e0K\nsR5O/rmCFPPWHDom1QOxTdibJtFxSc/tfW89KMJYils+twcCEXm/j/GlC6ejrT6JY46gJQ+cvGWC\nGsvqoxtq4uUn2EfwOHRYpjbgW+Ya++2kQmq5p9VNLluAcLe5RuiPKszyPml2G7568UyMaPDJ6+On\nTUR7QwpjWqk1EbS8939ayOSRNbjsiPNxycTzsajjxLLtluRaNzUqQiMatygKNFULvDiC5E3nroy8\nSxwnSSNoeZeiPu6Tt0zEHBVSA5rQBUiAvP2xcs+HHDoI08h3XCI8LPwl3taQFDK6iZiBasn676j3\n3fiJmA5VUZHQ/WssXfAYugqST6Hw5mwU101FXaU/3ngIecvEWSi6mKgfDUVzoTdtgIci4iELkLRR\nCaIEO7uVhif880slmpoaIG9P9ZOkOJRA1QDvXS/FsaUua45OiW/e+A7RwS6szDNlMNlbqVaeyt8S\n2MghLXljShd79BqY5rzJa8jpta8k//TVANl5t3aXS6USxwCIhoZqdi8kmeOWDno9Zx45HjGTajP0\nZf2ySRKKQnq/AAAbb0lEQVQbCJxfHo+i29CbN7BrTIjx8Xtijl6OdblV9CBXFzkAckKbodN7Egif\nlNxzy6I/suaaYEXJgUJE3u9jTBtdi29/4ihhhXLXuKym9qULp+Nri2dhTMv+eyC55T0cefPt6YSO\nb19+JCrfpqv6iDHUHdZWzVyKIdZFzNQxqaM68PI5dnoLbrr8SBh6iOUdco79AV3VcXTLkaFjNAzV\n713NSJeTN19Y8aoCUzUQtyTVOmF5S33R6ZkCf6UMyfIOJW/frZowysm7UnqRlxJj6THyNXLrkkus\nAggo1vE6esfxfCEPRkS1FTGcMa8DC6Y14XPnTEW15Sccjm32a/m5cE9ausbSaxC1+P31cLvbxLMD\nhJO3vMAp2C7aY+MB0HI7opAyyxsAKqQmL9yKa2sIL1NEqbWmlv825PvUW/Sbhvglf/52N6RxkRyO\naU+34YNjTgsQZMqS480avEwaWsUuaPVbQBQvcDwAjKroQEqpFp2+xjbV4rR57SK0YW8eL/bV0n1C\nOnU48FBGQzUTKUr6IaW8QRu3jKqvQ8LSkc07yG0ZAQyy6xS96/05mDLC9x7yTPWkkfS9H9K+y7LP\n0jE4BkawBQ4kt7mha9QL4egY1ZzGly+cXrbodPUcTM1EOvHOQmtvFxF5RxDglrfspo5bOsa1VQ13\nyD5B3UPMW1jm8IZ/2YXg46dNxFXnHYGFU6hs4R7lW4eBoRnCZbuv53gnMHVNvMy5vCTPOOfZ2jUx\npg6lKIhJ8WruNi9VsCq1vNvSkmBMyAtVJtQwy1te1ISRe3wYy1tkrEtWolySyMvRbMcT18jjoYqi\nIBEz8IkzJqO5NonqmL+gbK3zn9EjxtJrr5LIPVGywCgNxUwf689XmEuYuJqwVfNFF0nTAnE1EVMP\nI++URN7cyjv32NE4//gxZfsSV99ziZFENos6ThL/z/UQZOudZCqRe/HkQGy/QiJfRVGwqOMEJGzf\nQ5OWyRsKiutoAqLesLnseAC4es7nME+7UCwARjdX4YLjx4pyPrenFflXj5PGT3uNDwfujeGWt73N\nn6ch0Bh52kxhVEsFXI+gp9dD88CxwXOwRe8lp4zHF844DpW5CQAANUkt85SRRJznHYQtJFzd98rx\nksGqbrg1a2jioWvgklMmYOro2jLvQ09uV6gH5kAhIu8IAsfPpAlJs8aHtxvdXxhTORJAmHVIMb1u\nCh1P24K9Om/M1DFjXB3SZhKWZgaSrvYWPEZ6INzme4IpWd5claqmgvc7py8MTmxFtwhLiqNazHug\nqzqumvFptPfRspVSWhhd6ddUf2NxeQWCjHgIecsItbyHiXnLXp20Qq1dXv8L+Ja37bpoSNDnkBBg\nzsRgDgYAVPMFDILiOVzJri3V4o+x1PKWyPu4GS1oqfWvIWUl8K3510IfkhY4ro6PnEItyUVzR9De\n6LYpuqrFJaW9tnrqrm9IBpvoANSDcvq8Dnz9yC/jxBHHBM4vo3YoqONAXE2QCQBMrh+Lr5TkfJSF\nPzxdlNQBCP09aNLzLSc+AsBXPngs4GqC+NIhxxMoYlw8h+Wy0yYKWWRSjAnLXHZpA0DlllNwztgz\n/HOxPIiKBPME9jYF8hIA6k2Z2O7f95baCmiuf2/5d+SLDlRFRYfH+ruzkreUZHmHClY5BkYIqWMF\nbj99RovJbfQjVy+rfvHnItwDc6AQ1XlHEDh9XgcWTmt+227qfcWF4z+ICdVjMatxeuj2SbXj8b2F\n3wyWK+0FNFXDNXM+H3AN7y0Sehx9hf6DYnkbuubXm7JabRHzZqRTKxGX/DIxpSz6CTVjYbkZAD1l\n7D0i5WeOj24O96x8ceZnsHFwS5m7tBRhZYmyNR6WkAYA7eZErCg8g+aUb/0J8nY8zG2cgXV9GxDL\njMTZx00qO77GCo77psvmYiBbFHM1qmoEsCV8DHweZ42vx8dOnRgcu6WjLl6LOSNH47lupn3u6Zg/\npQknzaZCL6+s7mJSpdQzInsqvrZ4Nrr6cuhVN4nvT5oWvvrJo0TYoCXVhIUtR+GxzU/R87s6IHHC\nCGUatrxYg/icf9APSohGVYKJi6RE2lh87vj3Jox8PdZ61elpQoKFX266bC5Wb+7D5JE1UFdUwovv\nYseXPwfHTm/GP5dyOWBK3o01CXz90jlYuWEX/vPe10DsGHXtl1xDtVGHk9tnYkzlKNz94t+xsZMu\ndqaMqsH0TbUY0ZjCX5esByGK8C6kzBTGj/AXXifPbsPmt6rQyYVYuDgMlzw2LZCiJRZZKTOJmDK8\n5U1cHUdPbcIf/klbpBbfnIPYnL/BNQbE9pgxvOt/uGf9QCCyvCMIKIpywIkboC7Go5pn79aqTUuZ\nrfuCpmQjUua+kT9A3c5pI1VWc/5uwNJV1roRqGSJYaUxb9EUAcGyt9KSLz6DpIS9Dc3AhOqxGF9V\n7sLlGFc9Bie3Hzfsdo6w8IdcBx6WkAYAU5Nz8NkjPo6zR/v19kdNpkQ+sb0auqrjkknn47w5c0Q+\nggzZbQ4A7Y1pTB3lx65HVUqysiXPkio66ZW7qrkVP76+zf/Q1f0sZQCmqQUy5mXhoLilo70xHcgb\nOG56G1rqgs9jXPYGuDpOnOV/XzpBVdZ4YlmYlRhIOvR8adLW+iTOOYY1B5LqpsPCHzWDM+H2NqCt\nOE/McXtjGiczmVut6Lv+wyz3uso4KhL02r2S52DyyBpceMJYv1lKyTV091PCHVXZjqnG8aLne1NN\nAl+4YDqV2iWqyI8wVB2WZiIVN/CJMybhyxdOR3tjGpMa/C6AHz91Mlrrk2KR1TdUCCRHUstbCx0P\nAMAxkIobuP6js3Hy7DYACohtwVN5A52g5X3JxPOFpxAID58cKESWd4QIIfjY5ItQcIvvaAGxr9B1\nFW5nG2yjgCvPOx8AUJP21a+AYDKWXH5klpK3SJ0t/56rZn66/MO9wHnjzsIDax7CxJpxZdsaE37o\nZTjytkwd0+qCNevnHDMacyc2BKoBhgOPaZdm1nPwpD7e31lGW30SmzuHUFc1/MtWvi7iagGXv6Vr\nAZd02Eu7OdmI5mQjtmd2oqmy3LshW84nzhyB8SP8fToa0wAUmEocBZIVVutpR7ULyeOEEYcChS7M\nJCI6YWYrTpzVhhNnt+Hz/1WEmuqDSozQZ/mCo+bi0aWNWPyhCaFzoBerwIVd08N4ssZVjcZLna/5\n1QESqtIWyFZekkUt81HNVP50guT+lhdn3PuSZImH3kAN1FgWtudn4C+QmvzMbZyJf215BgBNPj12\nuh8uaapJYHl3HGqKJr8ljSRivN+BY+L85o/h3rcegJryLWuAVtmMba3EkZMace+W5diapS4U4hgB\nsaKjW47ElNqJeK2b9q1/N2PeEXlHiBACUzOHVak70DCYhKuzdRzqE9R6a29M4ZxjRokOaTweXB+v\nDVjbZoj4DDCsmN07wokjjgnGbSXIRCFaNZYgbKyqqgTaq+4Opmbg5gXfGHZxAAC5l06iL9sPBD//\n6AcmoK0hxayr0rHTfyulxjtfuXB2YB/TUAPkHQ/pLqcoCq6Z83m81rUC0+unlm3XVA03zbsWf1n3\nMOY1B88/aSQlNiPXgEJsA5QEJRdNU8X9VhUVcT2GrJMrkTv1O7DpJIbCiqNRlQ4nlTEtlbjinOHl\nluOZDgwZW2FU9pZpP3AsnnQBptVNxuyQMFgypkvtR6llfvyMFpxz7KhABYvrlmfX88WS21/ni+WE\nYGTFCDQmGtCSKs+h+dAxo5B5eRJeGKKqZykjAVPKjxhd3Q4vlxbkXZr1P7atEvW91YK8DcUs03pI\nmynoqg7HcyLLO0KE9zOSMQOXfmBCwPpUFAVnLRgl/q6NV+PauVehxqoWtdMAy1QPwbvUKyGARR0n\n4G8bH0d7upwggXIvwb6gcpjOdhy3f+kUhDlPYqaO044qb4RSio9Nvggv7HwFExpaA5/HTC0QTx7u\npW1qJuY2zRz2/PWJWnxy6uKyz6tSFlrrkujcUgN97Aa/R3XJjeTiIh111Th/8Ww8+vwmHD2Fkpii\nKHBcD4CCUU27n6fhoCkGiqtnY+KY6mFzH3Z3jRPaq1D7Vgp96BYKZJapBcIbANA7SGPSFSG9Cnij\nkXFVo8u2AfQ6bzjqK6GeBUPXcMmcU/DCE4/T79aswH6WqQXaKB83pTyMJHdPTJrhXRJrrCp05roD\nuQ8HGhF5R4hwCIJn/u8OYaSol3Tt8t9T7z57nz36VBzVNCvUnQr4mfEHEqX913eHay6eiQeeWheY\n+yObZuHIpvIOfg3VCZw4bQyeHqB61/EDYHGdd/wY/PTPWRQ3TII3SC3x0kXYpJrxWLVrNU4ffRLG\n1ldibFu4FT1hxL6Ve/L5U/YxPUpTVZw//Rj8YvkGuF10XsMWmJUpujiZPKqmbBscE1+c/CWMqKsu\n38awu/CWoer4ztHXI+fky/bTVQWktxVesg+jnWNw6YfKEyPlBWKlVU7eAK3+6Mx1v6sJaxF5R4jw\nHkLpy4n/fRAMbyiKMixxA8O7+A8WJnZU42sds/e8I8MHpx8JrO2GSzyMZuWP+xMzxtZhbGsVVm30\nPQSliYeXT/kICm4xkMAYhpHDtOfdEy47bRLufvRNXHRSeV7D28XMhmn45lFfxdeefx1AeF+Bs44e\nicqkhWOOaA58fvnpk/Dy6i6MaWh6R9LE1bEqhFG/rqtQMrUoLF+A6qnhYYFKSU2wKhHufeClm2Hh\nkwOFiLwjRHg/4GCw9x5Qmhl/uCFhxHHxxPMO6Hc01iSwamOv+LvU8k4YiVBteY5vXDoHb27uxbi2\nfVNIbKlL4tpLyj0Pe4vGZD14NnxYuMTQNZEhLmPhEc1YWELo+xO6poqSjPgwuvNT6yahVZuADVuK\n+MAJ4eWtnLwjt3mECBH2Cj/43AK4XnnSz26SzQ869kfM+72OxupgedecCeHW4XAY3VKB0S37ZnUf\nKBxKizZFAbhBP5znPWkkcN2xl9Me4lY4ZY6pot6RltSBW2iUIiLvCBHeA6geJpt4yqgavPRmF2aN\nrwvdfjBxqLnND0U0VvtW9e1XH79XMfxDFeYwCmXvJj555iS8sbGPlX3tObSkKsqwxA0A46vH4gfH\n/ntkeUeIEGH/4NjpLRjVVPG26qbfbZjvASI60JgyqhqTOqpx9NSm9wRxA76lezBx9NRmHD2VWsn7\nK6fz3SRuICLvCBHe01AVBR1N+67xfiDwqbMmY/32gVDVtAhBGLqGr148fKnZ4YTT53Xg2eXbUfUu\nqDjuDfzQ0qEYXBoeB3Qpd/PNN+PDH/4wLrroIrz++uuBbc8++yzOP/98fPjDH8Z///d/H8hhRIgQ\n4RDC/ClN+MjJ4/e8Y4T3FM4/fgx+eOXCQBOZQwEXn0wz6WVltsMBB8zyfv7557Fx40bcc889WLt2\nLa6//nrcc889Yvt3vvMd3HnnnWhsbMTixYvxgQ98AGPHjj1Qw4kQIUKECBHKILvQDyccsCXQkiVL\ncPLJJwMAxowZg/7+fgwNDQEANm/ejMrKSjQ3N0NVVRx33HFYsmTJgRpKhAgRIkSI8J7CAbO8u7u7\nMWWK322lpqYGXV1dSKVS6OrqQk1NTWDb5s2bd3u+6uoE9P0cI6uvP7RigYcronl854jm8J0jmsP9\ng2ge3znejTl81xLWSjV59xa9vdn9NBKK+vo0uroG9+s534+I5vGdI5rDd45oDvcPonl859jfczjc\nQuCAuc0bGhrQ3d0t/u7s7ER9fX3otp07d6KhYe/EByJEiBAhQoT3Kw4YeS9YsACPPvooAGDFihVo\naGhAKkVrTdva2jA0NIQtW7bAcRw8/vjjWLBgwYEaSoQIESJEiPCewgFzm8+aNQtTpkzBRRddBEVR\ncOONN+L+++9HOp3GKaecgptuuglf+cpXAACnn346Ro0atYczRogQIUKECBEAQCHvNBj9LmF/x2Gi\n2M7+QTSP7xzRHL5zRHO4fxDN4zvHYR/zjhAhQoQIESIcGETkHSFChAgRIhxmiMg7QoQIESJEOMwQ\nkXeECBEiRIhwmCEi7wgRIkSIEOEww2GTbR4hQoQIESJEoIgs7wgRIkSIEOEwQ0TeESJEiBAhwmGG\niLwjRIgQIUKEwwwReUeIECFChAiHGSLyjhAhQoQIEQ4zROQdIUKECBEiHGY4YF3FDmXcfPPNeO21\n16AoCq6//nocccQRB3tIhzRWr16NK664Ah//+MexePFibN++Hddccw1c10V9fT3+4z/+A6Zp4sEH\nH8RvfvMbqKqKCy+8EBdccMHBHvohg1tuuQUvvfQSHMfBZz7zGUybNi2aw71ALpfDddddh56eHhQK\nBVxxxRWYOHFiNIf7iHw+jzPPPBNXXHEF5s+fH83jXmDp0qX4whe+gHHjxgEAxo8fj09+8pPv/hyS\n9xmWLl1KPv3pTxNCCFmzZg258MILD/KIDm1kMhmyePFi8o1vfIPcfffdhBBCrrvuOvJ///d/hBBC\nfvCDH5Df/va3JJPJkEWLFpGBgQGSy+XIGWecQXp7ew/m0A8ZLFmyhHzyk58khBCya9cuctxxx0Vz\nuJd46KGHyB133EEIIWTLli1k0aJF0Ry+A/zwhz8k5557LvnTn/4UzeNe4rnnniOf//znA58djDl8\n37nNlyxZgpNPPhkAMGbMGPT392NoaOggj+rQhWma+PnPf46Ghgbx2dKlS3HSSScBAE444QQsWbIE\nr732GqZNm4Z0Oo1YLIZZs2bh5ZdfPljDPqQwd+5c/PjHPwYAVFRUIJfLRXO4lzj99NPxqU99CgCw\nfft2NDY2RnO4j1i7di3WrFmD448/HkD0e94fOBhz+L4j7+7ublRXV4u/a2pq0NXVdRBHdGhD13XE\nYrHAZ7lcDqZpAgBqa2vR1dWF7u5u1NTUiH2iefWhaRoSiQQA4L777sOxxx4bzeE+4qKLLsLVV1+N\n66+/PprDfcT3v/99XHfddeLvaB73HmvWrMFnP/tZXHzxxXjmmWcOyhy+L2PeMkikDvuOMNz8RfNa\njn/84x+477778Mtf/hKLFi0Sn0dz+Pbxhz/8AatWrcJXv/rVwPxEc/j28Oc//xkzZszAiBEjQrdH\n87hnjBw5EldeeSVOO+00bN68GZdeeilc1xXb3605fN+Rd0NDA7q7u8XfnZ2dqK+vP4gjOvyQSCSQ\nz+cRi8Wwc+dONDQ0hM7rjBkzDuIoDy089dRT+NnPfoZf/OIXSKfT0RzuJZYvX47a2lo0Nzdj0qRJ\ncF0XyWQymsO9xBNPPIHNmzfjiSeewI4dO2CaZvQs7iUaGxtx+umnAwDa29tRV1eHZcuWvetz+L5z\nmy9YsACPPvooAGDFihVoaGhAKpU6yKM6vHD00UeLOfzb3/6GY445BtOnT8eyZcswMDCATCaDl19+\nGXPmzDnIIz00MDg4iFtuuQW33347qqqqAERzuLd48cUX8ctf/hIADX1ls9loDvcBP/rRj/CnP/0J\n9957Ly644AJcccUV0TzuJR588EHceeedAICuri709PTg3HPPfdfn8H3ZVezWW2/Fiy++CEVRcOON\nN2LixIkHe0iHLJYvX47vf//72Lp1K3RdR2NjI2699VZcd911KBQKaGlpwXe/+10YhoFHHnkEd955\nJxRFweLFi3H22Wcf7OEfErjnnntw2223YdSoUeKz733ve/jGN74RzeHbRD6fx9e//nVs374d+Xwe\nV155JaZOnYprr702msN9xG233YbW1lYsXLgwmse9wNDQEK6++moMDAzAtm1ceeWVmDRp0rs+h+9L\n8o4QIUKECBEOZ7zv3OYRIkSIECHC4Y6IvCNEiBAhQoTDDBF5R4gQIUKECIcZIvKOECFChAgRDjNE\n5B0hQoQIESIcZnjfibREiHC44ZZbbsGyZctQKBSwcuVKzJw5EwBw3nnn4UMf+tDbOscdd9yB8ePH\nCz3rMHz0ox/Fr3/9a2iatj+GHcDOnTuxbt06zJ8/f7+fO0KE9yOiUrEIEQ4TbNmyBR/5yEfw5JNP\nHuyh7DUefPBBrF27Fl/60pcO9lAiRHhPILK8I0Q4jHHbbbdhy5Yt2LZtG6699lrk83nceuutME0T\n+XweN954I6ZMmYLrrrsOs2fPxvz58/Fv//ZvWLhwIV5//XVkMhncfvvtaGxsxIQJE7BixQr89Kc/\nRV9fH3bs2IGNGzfiqKOOwg033IBCoYBrr70WW7duRVNTEzRNw4IFCwI9ijOZDL7yla9gYGAAjuPg\nhBNOwJlnnokf/ehHIISgqqoKl1xyCb797W9j48aNyGQyOPPMM3H55Zfj/vvvx9///ncoioKdO3di\n9OjRuPnmm2EYxkGc4QgRDk1EMe8IEQ5zbNmyBXfddRemTp2Kvr4+3HTTTbjrrrtw6aWX4vbbby/b\nf+3atTj33HPx29/+FpMmTcLDDz9cts/KlSvxk5/8BPfddx/uv/9+9Pf348EHH4TjOPjjH/+Ib37z\nm3jmmWfKjnv22WfhOA5+97vf4Q9/+AMSiQRaW1txzjnn4Oyzz8Zll12Gu+66Cw0NDbj77rvxxz/+\nEQ899BDeeOMNAMCyZctw66234r777sO2bdsOSy9DhAjvBiLLO0KEwxzTp0+HoigAgLq6Otxyyy0o\nFAoYHBxEZWVl2f7V1dUYN24cAKClpQV9fX1l+8yePRuapkHTNFRXV6O/vx+rVq3CkUceCQCor6/H\n7Nmzy46bNWsWfvKTn+ALX/gCjjvuOFxwwQVQ1aCNsHTpUuzYsQMvvPACAKBYLGLTpk3ieN4+debM\nmVi7dq3okxwhQgQfEXlHiHCYQ3YrX3PNNfjWt76F+fPn4/HHHxfNPGSUJqSFpb2E7eN5XoCIS0kZ\noL2M//KXv+CVV17BP//5T5x33nl44IEHAvuYponPfe5zOPXUUwOf33///fA8b7fjihAhAkXkNo8Q\n4T2E7u5ujBs3Dq7r4pFHHkGxWNxv5x49ejReeeUVAEBPTw9eeun/t3eHOAoDYRTHHyGYJlwAMAjg\nAFROSC0STCWCIJCYBhwOwxEqegIkuqLBbRN0LQaBxkBZsdkaDJutmeb/05PJ517eZCbz9bYmSRLF\ncazhcKggCOQ4jm63m2q1mh6Ph6SfVv97VJ/nuXa7XdH+z+ez7ve7Xq+X0jTVYDAobX6gSmjeQIUs\nFgvNZjO1Wi3N53MFQaAoikrZezqdKo5j+b6vTqcj13XfGnq329V6vVYYhqrX6zLGqN1uy3VdrVYr\nNRoNLZdLZVkm3/f1fD7leV7xVWq/39dms9HlclGv15MxppTZgarhqRiAj1yvV6VpqvF4rDzPNZlM\ntN1ui3fn/3U4HHQ6nbTf70vZD6gymjeAjzSbTR2Px+J/4tFoVFpwA/gbmjcAAJbhwhoAAJYhvAEA\nsAzhDQCAZQhvAAAsQ3gDAGAZwhsAAMt8AxJ5C+54P8QOAAAAAElFTkSuQmCC\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeAVNXd//++ZdrONsqyNBUECxZQRBHUoKLoE+lP0F+i\nxMT4tRDFWBKVxG7UJPaK8mBBE40lQBAVFAQE6bAU6WWBZXvf6bec3x+3zu7M7iw7w+4Onxd/MDO3\nnXtn9rzPp5zP4RhjDARBEATRAnx7N4AgCILoHJBgEARBEAlBgkEQBEEkBAkGQRAEkRAkGARBEERC\nkGAQBEEQCUGCQRDtzLp16zBq1KiE9n399dfxxz/+sc3nIYhjgQSDaHeuvPJKnHvuuaitrY36fMKE\nCTjzzDNRXFwMAHjooYdw5plnYtu2beY+hw8fxplnnmm+nzp1Kj7//HPz/cyZMzF69GgMHToUl19+\nOe677z4AwNixYzF06FAMHToUZ511FgYPHozzzz8fQ4cOxTvvvJPK240Jx3FJ2bc15yGI1iK2dwMI\nAgD69u2LhQsX4sYbbwQA7NmzB+FwOKoD5DgOubm5ePnllzF79uyoz2Mxd+5cLFiwAB988AH69u2L\nqqoqLF26FADw5ZdfmvtNnToVEydOxP/+7/+m4tYIIm0gC4PoEEyYMAFz584138+dOxeTJk1qst+k\nSZOwe/dubNiwocVzbt++HZdeein69u0LAOjWrRumTJkSc9+WCh68/vrruOeee/DHP/4RQ4cOxfjx\n41FYWIh33nkHI0eOxBVXXIEff/zR3L+8vBx33nknhg8fjmuuuQafffaZuS0cDuOhhx7CRRddhLFj\nx0ZZTMax06dPx4gRI3DVVVfhww8/bPFeY7F//35MnToVF154IcaNG2eKJQAsX74c1113HYYOHYpR\no0bhvffeAwDU1NTgjjvuwIUXXojhw4fjpptuOqZrE+kJCQbRIRgyZAj8fj8OHDgAVVXxzTffYPz4\n8U06crfbjTvuuAMvvvhiQuecN28eZs+eje3bt0NV1Ta1cdmyZZg0aRI2bNiAQYMG4Xe/+x0YY/jh\nhx8wbdo0PPLII+a+9913H3r37o2VK1filVdewYsvvog1a9YAAF577TUUFRVhyZIlmD17NubNm2ce\nxxjDHXfcgUGDBmHlypV4//33MWfOHKxatapVbZVlGXfeeScuu+wyrF69Gn/+85/xwAMPoLCwEADw\n5z//GU899RQ2bdqEL7/8EhdffDEA4L333kPPnj2xdu1a/Pjjj7j33nvb9MyI9IIEg+gwTJgwAfPm\nzcOqVatw6qmnokePHjH3u/7661FSUoIffvih2fONHz8ejzzyCFatWoWpU6di5MiRbYpPDBs2DCNH\njgTP87j22mtRU1OD2267DYIg4Oc//zmKi4vh8/lQUlKCzZs344EHHoDD4cCZZ56JKVOmYP78+QCA\nb775BnfeeSeysrKQn5+PqVOnmtfYunUramtrceedd0IQBPTt2xdTpkzBwoULW9XWgoICBAIB3Hbb\nbRBFERdffDGuuOIK0xXndDqxb98++Hw+ZGVlYdCgQQAAURRRUVGBoqIiCIKACy644JifF5F+kGAQ\nHYbx48fjyy+/xNy5czFhwoS4+zmdTkybNg2vvPJKi66ksWPH4t1338WGDRvwxBNP4NVXX231aN2g\nW7du5mu3240uXbqY8RO32w3GGPx+PyoqKpCTkwOPx2Pu37t3b5SXlwPQXE49e/aM2mZQXFyMsrIy\nXHTRRbjoootw4YUX4u2330Z1dXWr2lpeXo5evXpFfWZvw6uvvoply5bhyiuvxNSpU1FQUAAAuPXW\nW3HyySfjlltuwdVXX90uCQBEx4UEg+gw9O7dG3369MGKFSswZsyYZvedPHkyGhoa8O233yZ0bkEQ\ncM011+CMM87A3r17k9HcuPTo0QN1dXUIBALmZyUlJabFlJeXh5KSEnObkQUGAL169ULfvn2xbt06\nrFu3DuvXr8fGjRsxc+bMVrfBfg3jOkYbzjnnHLz55ptYvXo1Ro8ejT/84Q8AgIyMDDz44IP47rvv\nMHPmTLz//vumK40gSDCIDsUzzzyDDz74AG63u9n9BEHAXXfdhVmzZsXdZ+7cuVi+fDn8fj8YY1i+\nfDn279+PwYMHJ7vZUfTs2RPnn38+XnzxRUQiEezatQuff/45xo8fDwD4n//5H7z99tuor69HaWkp\nPvroI/PYwYMHIzMzE7NmzUI4HIaiKNi7d2+TwHhLDBkyBBkZGZg1axZkWcbatWuxbNkyjB07FpIk\nYcGCBfD5fBAEAV6vF4IgANDiNIcPHwagiYcgCOY2gqC0WqLdsafFnnTSSXG3NWbs2LF455130NDQ\nEHP/zMxMzJw5EwcOHICiKOjduzcef/xxDB06NOFrtAb7eV544QU89thjuOyyy5CTk4N77rkHI0aM\nAADcddddeOyxxzB69Gjk5+dj8uTJmDNnDgCA53nMnDkTzz33HEaPHg1JktC/f3/cc889rWqLw+HA\nW2+9hccffxxvv/02evbsib///e/o168fJEnC/Pnz8fTTT0NRFPTv3x/PP/88AKCwsBBPPvkkampq\nkJOTgxtvvBEXXnhhUp4P0fnhUrmAUmlpKf70pz+hsrISgiBgypQp+PWvfx21z7p16zBt2jSzo7j6\n6qsxbdq0VDWJIAiCOEZSamEIgoCHH34YgwYNgt/vx+TJk3HJJZdgwIABUfsNGzas1T5agiAI4viS\n0hhGXl6ema7n9XoxYMAAM0uDIAiC6Fwct6B3UVERdu3aFTPgWFBQgIkTJ+K2227Dvn37jleTCIIg\niFaQ0hiGgd/vx9SpUzFt2jRcddVVTbbxPA+Px4Ply5fjmWeewaJFi1LdJIIgCKKVpNzCkGUZ06dP\nx4QJE5qIBaC5qowJTqNGjYIkSU2qljbmOGgcQRAE0YiUp9XOmDEDAwcOxM033xxze2VlJbp37w5A\nK4sAALm5uc2ek+M4VFQ0NLvPiUJeXhY9Cx16Fhb0LCzoWVjk5WW16fiUCsbGjRuxYMECnH766Zg4\ncSI4jsO9996L4uJicByHG264AYsWLcLHH38MURThdrvx0ksvpbJJBEEQxDFyXGIYqYBGDBo0erKg\nZ2FBz8KCnoVFWy0MKg1CEARBJAQJBkEQBJEQJBgEQRBEQpBgEARBEAlBgkEQBEEkBAkGQRCEDZ/P\nh7lzPz+mY//0pz/A7/clvP+7776DTz75qOUdOwgkGARBEDYaGuoxd+5nMbepqtrssX//+8vwejNT\n0awOQadcQOmR715AqDIHd1w8CdleZ3s3hyCINGLmzNdRXHwUt9xyI4YNG44RIy7Be+/NQrdu3bFv\n3x58+OGnePjhB1BRUY5IJIwpU36JceMmAgCmTBmP2bM/RCAQwAMPTMe5556H7du3IC8vH8899wKc\nzvj91d69u/H8888hHA6jT58+ePjhx5CZmYnPPvsE8+f/B6Iool+//nj88b9i8+aNePXVF/RFuzi8\n8casqDXkU0WnFIzdVfsADvhg0SDcPTm1y20SBNF+fLp0H9bvatuSCILAQVGs+ckXntkD1185MO7+\nd955NwoLD+Ddd/8JANi8eSN27tyBDz/8FD179gQAzJjxGLKyshAOh/H//t+vMWrUlcjOzgZgrbpY\nVHQETzzxLB588M949NGHsWzZUowZc23c6z799OO4774HMWTIeZg9+2289947uPvu+/DPf36Azz9f\nAFEUTXfXJ598hPvvfwjnnDMYoVCoWSFKJp3aJVUfab5IIUEQRDI466yzTbEAgE8//Rd+85tf4fbb\nf4vy8nIUFR3Wt1jC1KtXbwwYoAnTGWecidLS4rjn9/t98Pt9GDLkPADAtddeh4KCzQCAgQNPw+OP\n/xmLF38NntfWVz/33CF49dUX8fnnn6ChoR48f3y68k5pYUQOnQnnKbvAvDXt3RSCIFLI9VcObNYa\nSIRklAZxu93m682bN2LTpg1455334XQ6cffdtyMSiTQ5xj7q53kh5j524lVp+sc/XkFBwSasXLkc\n77//f/joo89w002/wciRl2H16pW4/fbf4uWX38TJJ59yjHeXOJ3SwmD+HACA5CDBIAgiuWRkZCAQ\nCMTd7vf7kJWVBafTiUOHCvHTT9tj7teaMn1ebyays7OxdWsBAGDRoq9w3nlDAQBlZaU4//wLcOed\n0+H3+xAMBnD0aBFOPXUAbrzxZpxxxiAcPlyY+A22gU5pYagBrYCWH1Xt3BKCINKN7OwcnHvuENx8\n8/+H4cNHYsSIS6K2Dx8+EvPmfYHf/OZXOPnkU3DOOefatloxDC0gnTgzZjyO559/FuFwGL1798GM\nGY9BlmU8+eQj8Pv9ABhuuOFGeL2ZmDXrLWzatAGCIKBfv1Nx8cWXtHj+ZNApq9Xe/MQi+PsthuCQ\n8dpVT7X6i0knqBKnBT0LC3oWFvQsLE7IarXvPzoGQiQHTIigLlLf3s0hCII4IeiUgsFxHFyqppSV\nwep2bg1BEMSJQacUDADwIBsAUBGgOAZBEMTxoNMKRqagZUrtrYif20wQBEEkj04rGJeeoeVmbz96\npJ1bQhAEcWLQaQVjxOn9AAaEWOKVIQmCIIhjp9MKhsAL4FQnFK752ZMEQRCtoS3lzQHg008/Rjgc\njrnt7rtvx+7du4753O1NpxUMABCYE4yPQFaaLzlMEASRKM2VN0+Ezz77GOFwKIkt6jh0ypneBg7O\nBUkMoCEgoUuWq72bQxBEGtC4vPm0adPxr399iO+//xaSJONnP7sct9xyG0KhEB599CFUVJRDVVXc\nfPOtqK6uRGVlBe6++w7k5ubilVfeinudb7/9Bh999D4A4OKLL8Gdd94NVVXx3HNPYffunQA4XHfd\neFx//S9jljhvDzq1YLh4D4JcFap9fhIMgkhD/rPvS2wu39amcwg8B0W1Clqc3+NcTB44Nu7+jcub\nr1+/BkVFhzFr1hwwxvDgg/dhy5YC1NZWo3v3PPz97y8DAAIBPzIyvPj3vz/Ga6+9rZc7j01lZSVm\nznwd7733T2RmZuHee3+PlSuXIy8vHxUV5fjgg08AwCxnHqvEeXvQqV1SHkFbMKTCR7O9CYJIDevW\nrcX69etwyy034pZbbsThw4dQVHQYp546EBs2rMPMma9jy5YCZGR49SMY7GXOY7Fr108YOnQYsrNz\nwPM8rr76WhQUbEbv3n1QUlKMl19+HmvXrjbPGavEeXvQqS2MDIcHCAM17ai4BEGkjskDxzZrDSRC\nW2tJMcYwdepvMH78pCbbZs/+CKtXr8Lbb7+Oiy66GL/5za0JnzNWGb+srCy8//7HWLt2Nf7zn0+x\ndOm3ePjhR2OWOD9ea2DY6dQWRpZTU9/qABUWIwgiOTQubz58+MVYuPC/CAaDAIDKygrU1NSgsrIS\nLpcLY8Zci1/+8ibs2bNbP96rV5eNz1lnnYMtWzajvr4OiqLgu+8W4bzzhqKurhaqqmDUqCtw6613\nYu9e7ZyxSpy3B53awsh2e4EGoC7U/JdDEASRKI3Lm0+bNh2FhYW4447fAtAE5ZFHnkJR0RG88cYr\n4HkOoujAAw88DAAYP34iHnhgOrp3z2sS9DYqa3fr1h233/573H337QCAESMuxaWX/gz79u3FM888\nAcZUcByHO+64O26J8/agU5Y3B4CKigYs3rcK8w/PxymRS/Cnaye0d5PaBSrdbEHPwoKehQU9C4sT\nsry5QY+sLgCABpl+DARBEKmmUwtG36x8AECA0VKtBEEQqaZTC0ZXTxdAFRAWKK2WIAgi1XRqweA5\nHqKcBdXZAEVV2rs5BEEQaU2nFgwAcLMccLyKcl9tezeFIAgiren0gpHt1KL+e0vL2rklBEEQ6U2n\nF4zeOV0BAHtKy9u5JQRBEOlNSgWjtLQUv/71r/Hzn/8c48aNw5w5c2Lu9/TTT2PMmDGYMGECdu7c\n2apr9M/LAwAcqa5sc3sJgiCI+KR0prcgCHj44YcxaNAg+P1+TJ48GZdccgkGDBhg7rN8+XIcPnwY\nixcvxpYtW/DYY4/h008/TfgaeZm5AID6CNWTIgiCSCUptTDy8vIwaNAgAIDX68WAAQNQXh7tOlqy\nZAkmTpwIABgyZAgaGhpQWZm4tZDt1KbIB1U/1M45aZ0gCKJTcNxiGEVFRdi1axcGDx4c9Xl5eTl6\n9uxpvs/Pz0dZWeIBbCPozYQwGgJSchpLEARBNOG4FB/0+/2YPn06ZsyYAa/XG7UtVikro0BXcxg1\nUbqqGdoxjjCYwLe5Vkpn5ES853jQs7CgZ2FBzyI5pFwwZFnG9OnTMWHCBFx11VVNtufn56O0tNR8\nX1paih49erR4XnsxMSc8CDnC2H+oBrnuTl2At9VQYTULehYW9Cws6FlYdPjigzNmzMDAgQNx8803\nx9w+evRozJs3DwBQUFCA7OxsdO/evVXX8IpecI4I9hyhyXsEQRCpIqXD8Y0bN2LBggU4/fTTMXHi\nRHAch3vvvRfFxcXgOA433HADRo0aheXLl+Pqq6+Gx+PBs88+2+rr5GXmokauxNKNhzDh0n7IcDtS\ncDcEQRAnNikVjAsuuCCheRWPPvpom66T49LMLFUIo6o+TIJBEASRAjr9TG/AypSCI4x6f6R9G0MQ\nBJGmpIVgZOlzMThHBHX+cDu3hiAIIj1JC8EwLAzOEUIdWRgEQRApIS0EIy+jGwCA9/hR5yPBIAiC\nSAVpIRi9vb3AgQOfW47qAOVbEwRBpIK0EAy36AIDA+8O4oC4or2bQxAEkZakhWAAwPCeFwAAgq7i\ndm4JQRBEepI2gvGL08YBAHjJ28KeBEEQxLGQNoKR4cgAwplgnNzeTSEIgkhL0kYwAIBXBTCeBIMg\nCCIVpJVgcMwB8ApUprZ3UwiCINKOtBIMXi+NFVFoISWCIIhkk16CwbSig2GFJu8RBEEkm7QSDEG3\nMMIK1ZMiCIJINmkmGJqFEZRC7dwSgiCI9COtBEPkNMHwR0gwCIIgkk2aCYYTABCUyCVFEASRbNJM\nMDQLI0AuKYIgiKSTVoLh5HXBkMnCIAiCSDZpJRgO3SUVIguDIAgi6aSVYLh4FwAgKJNgEARBJJv0\nEgzBAwAISMF2bglBEET6kVaC4RbcAICgTIJBEASRbNJLMETNwggq5JIiCIJINmklGB7dwggpZGEQ\nBEEkm7QSDJdDBFMEqiVFEASRAtJKMESBA5MdCKvkkiIIgkg2aSUYDpEHFAcijASDIAgi2aSVYIgC\nDyaLkFiEVt0jCIJIMmklGE6HACh6iXOavEcQBJFU0kowsjwOMFkvcS4F2rk1BEEQ6UV6CUaGA0zS\n6kn5JH87t4YgCCK9SDPBcAKyLhgRXzu3hiAIIr1IK8FwOwVwClkYBEEQqSCtBIPjOHjEDACAL0KC\nQRAEkUxSKhgzZszAyJEjMW7cuJjb161bh2HDhmHSpEmYNGkS3nzzzTZf0yt4AQANErmkCIIgkomY\nypNPnjwZU6dOxZ/+9Ke4+wwbNgwzZ85M2jWznF7UAqgPk2AQBEEkk5RaGMOGDUN2dnYqL9GEHHcW\nAKAu1HBcr0sQBJHutHsMo6CgABMnTsRtt92Gffv2tfl8mW43mCKgLkKCQRAEkUxS6pJqibPPPhvf\nf/89PB4Pli9fjt///vdYtGhRm87pdTvA6jNQI1aDMQaO45LUWoIgiBObdhUMr9drvh41ahSeeOIJ\n1NbWIjc3t8Vj8/KyYn/eNROsPANSRgMcWQxdPMfXJdYexHsWJyL0LCzoWVjQs0gOKRcMxljcbZWV\nlejevTsAYOvWrQCQkFgAQEVFbJcTUxSooQwIAHYeKcRpXU5tXYM7GXl5WXGfxYkGPQsLehYW9Cws\n2iqcKRWM+++/H2vXrkVtbS0uv/xy3H333ZAkCRzH4YYbbsCiRYvw8ccfQxRFuN1uvPTSS22+ZoZL\nBAtplktFsDLtBYMgCOJ4kVLBeOGFF5rdfuONN+LGG29M6jW9btGsJ0UFCAmCIJJHu2dJJZsMtwNg\n2m3JqtLOrSEIgkgf0k4wPG7RFAyFyQkdwxhrNtZCEARBpKFgeN0imNo6C+Optc/jlc1vp7JZBEEQ\nnZ52TatNBR6nCDBt7oWcoIVRFqhAWaAilc0iCILo9KSdhcHzHDwOLeitUAyDIAgiaaSdYABAhtsF\nAJDVxCwMgiAIomXSUjCy3JqFISVgYahMTXVzCIIg0oI0FQw3ACAsSy3uS24rgiCIxEhLwcj2aC6p\nkNSyYMiMBIMgCCIR0lIwMj2ahREhC4MgCCJpJCQYX331FXw+bQW7V155Bb/73e+wffv2lDasLeRk\naBZGRG456J1o6i1BEMSJTkKC8dZbbyEzMxNbt27FypUrMXHiRDz99NOpbtsxk5OhWxhK6ywMCoAT\nBEHEJyHBEEVtft+qVaswZcoUjBs3DuFwOKUNawtZXieYykFKIK3WHsMg9xRBEER8EhIMjuPw3//+\nFwsXLsSIESMAAFICAeX2IlMvQJhIaRC7SCgUACcIgohLQoLxl7/8Bd988w2mTJmCk046CYWFhRg+\nfHiq23bMeN0ioPIJCYA9hqGQS4ogCCIuCdWSGjp0KN58803zfb9+/fDII4+krFFtxevRLIxEBIMs\nDIIgiMRIyMJ47rnn0NDQAFmW8atf/QrnnXce5s+fn+q2HTNupwAwHmoiFoZKMQyCIIhESEgwfvzx\nR2RlZWHlypXIz8/HokWL8O6776a6bccMx3HgwENFAhYGIwuDIAgiEVo1cW/9+vW4+uqrkZ+fD47j\nUtWmpMBDAEPLMQl7gUKyMAiCIOKTkGB069YNf/nLX/DVV1/hkksugSzLUJSO3bkKnADGqVBbWEkv\n2sKgoDdBEEQ8EhKMF154AQMHDsRLL72EnJwclJaW4re//W2q29YmBE4AOBWhcPPCJlPQmyAIIiES\nEoyuXbvipptugtfrxb59+9CzZ09Mnjw51W1rEyIvgOMZGgLNTzC0i0SiS7oSBEGciCSUVrtt2zZM\nnz4dTqcTjDHIsozXXnsNZ599dqrbd8w4BAcAoMYfQn5Xb9z9yMIgCIJIjIQE469//SueeeYZc5b3\nmjVr8NRTT+GTTz5JaePaglMQAQWo9Yea3U+xTdxLJA2XIAjiRCUhl1QwGDTFAgAuvvhiBIPBlDUq\nGTj1+ld1gebbGT0Pg4LeBEEQ8UhIMDweD9asWWO+X7duHTweT8oalQzcorZMa12gJQvDFsOgUucE\nQRBxScglNWPGDNxzzz1wOvW1siUJr776akob1lY8DkMwAs3uFx3DIAuDIAgiHgkJxuDBg7F48WIc\nPHgQjDH0798fY8aMwbJly1LcvGOniycbqAXqwr5m91No4h5BEERCJCQYAOBwOHD66aeb71kLE+La\nm64ZWQCA+hYEQ6bSIARBEAlxzGt6d/TSINkuTTBCavMuKYWKDxIEQSREsxbGvn374m6TE1gvuz3J\ncmYCACJoIYZBFgZBEERCNCsYt912W9xtLpcr6Y1JJlkOTTBkNJ8lJavWyoEU9CYIgohPs4KxdOnS\n49WOpJPp1GZ3K0LzpUHCSsR8TRYGQRBEfI45htHRyXRogsGEMBRVhSSrqGloKh4h2fqMYhgEQRDx\nSVvBEHkRHBPBCRJCEQUvf7YF97+xCrW+aNE4UFpjviYLgyAIIj5pKxgAIDAR4BWEIwp2HtKEoSEg\nRe3jC1ulQ+yLKREEQRDRpFQwZsyYgZEjR2LcuHFx93n66acxZswYTJgwATt37kzq9QXOAfAqghHL\nchD4RunAvLUtKDcfICcIgjiRSalgTJ48GbNnz467ffny5Th8+DAWL16MJ598Eo899lhSry9wIjhB\nwaqtJRC6lsB5xjqE5UjUPpxoWRUkGARBEPFJqWAMGzYM2dnZcbcvWbIEEydOBAAMGTIEDQ0NqKys\nTNr1Rc4B8Aq+WXcYzoFbIORUY1/DXgDAtxuO4I7nlwG8AjXsBgAE5I5dgZcgCKI9adcYRnl5OXr2\n7Gm+z8/PR1lZWdLO7+Qd4HgVgFXGpCpUDQD4+Lu9iMgqwMuA5AIYcNRXgtpwXdKuTxAEkU60q2DE\nqkeVzJIjDl5bdc8epygLldgupoLjGZgigmMOVIdq8OdVf03a9QmCINKJhIsPpoL8/HyUlpaa70tL\nS9GjR4+Ejs3Ly2pxnwyXG5AAzmW5mioj5fDk8HCduR5SST/tQ0UAVN6Uz0TO3ZHobO1NJfQsLOhZ\nWNCzSA4pF4zmqtqOHj0a//znP/Hzn/8cBQUFyM7ORvfu3RM6b0VFQ4v78EzQ/s+w9q0JV2PJzjXg\ns6vgyq7S2qiKYLyVbpvIuTsKeXlZnaq9qYSehQU9Cwt6FhZtFc6UCsb999+PtWvXora2Fpdffjnu\nvvtuSJIEjuNwww03YNSoUVi+fDmuvvpqeDwePPvss0m9vkvQFlHiXFYBQgaGf+76PHpHRQD42HWk\n/FIAHtENnkvrKSsEQRAtklLBeOGFF1rc59FHH03Z9Z2GYDi1dFk1kBVlbRgwVYh5fGWwGk+u+Qem\nnD4el/UZEXMfgiCIE4W0Hja7BC3obcQwlOr82DvGEYxSfxkUpqDEX56S9hEEQXQm0lswRK0EO+fU\nBaO2B/p5Tmu6oyqALxze5OMGyQ8ACMvNV7wlCII4EUhrwXCLRgxDn8EtO9DV0TQLi6k8uIYe6J99\nSlSswhfRlncNKyQYbUVVGRSV1hshiM5MmguGbmHoAW0mO83MqShUHqrKIPA8VKaamV0+3cIIkWC0\nmfvfWIXpr6xs72YQBNEG2nUeRqrxOJzWG1XQXE9cDMFgAhTGIOjbVKZC4ARTMMjCaDt1/kjLOxEE\n0aFJawvD47CWkeUUTTyatTB0wTDWxfBFdAsjRgxj/9E6vDF3G8IRWkODIIgTg7QWjAyn23zNq5p4\n8FxTo4rZXFKATTBMC6Pp6PivH27Ext0VWLmtpMk2giCIdCStBcMreszXAtMD4LEsDMZDsVkYsmpY\nGC0HvRWFArkEQZwYpLVguEXLwjAEg48VtlG1x8A3dklJ2gzx5oLe8QufEARBpBdpLRgem2A4OD2e\nwZresjHDQ/F7AAAgAElEQVTTW9HDEXuO1EBSZYQULR1XVmUoKsUqCII4sTlhBEPUBSNm0FsXkcMl\nWsziixV74dfjFwaUKUUQxIlOWguGyFvuJ5cuGDFjGLpLqs6nVazNyXKgIRItGDQXgyCIE520Fgw7\nTkGzNjjEF4yIpEUkvB6hiYURK7WWIAjiROKEEQw3r1sYMQoNMt3qYLpwhCXZzJDyihkArBTbpscm\nvakEQRAdkhNGMFy6haEqMZaA1YUCTNsWliWz8ODJ2X0BANWhmqhDhG7FcA9bBL+anmuAl9UEUOtL\nvlXV3IJaBEF0bE4YwRD1dNqv1xxtutEUDO3/kCybFsXJWbEFw3HqVnA8Q6G0LUUtbl8efnsN7nt9\nVdLPS3pBEJ2XE0YwOF63LNQYt8yiLYyILCMoaym1fTJ7AQCqQ7XRh4S1SYFBRks/tgaVFIMgOi1p\nLxhy2ckAgGxeXys8xjwMQBcTm2AoqgwA6JGhHdfYwjAEI8Dqk9zi9IZcUgTReUl7wegdvgjBddeg\nb9cu+icxYhgGuphEFBmyPtvbI7qR5chsIhjGefxqHVRG5UGawy4SKukFQXRa0rq8OQA89KsLUFUf\nQqhRVVnGAK6RdjDdwpAUGbKqvRZ5EV3dXXDUVwyVqeYCS5ygWSAyIjjqK8VJWb1TfCfHj2RbAXY3\nlEqKQRCdlrS3MFxOAb27e8Hb7jS48UqENl7VdGfdwuA4hoisTeITOAFO5oXMFDToqbbaBtl8uat6\nT0ra3l4kO85gPx25pAii85L2gmHA280JxQmoMYwr3cIApyKiaIIg8gJ27NUC4Ha3FCfIYLIIDjzW\nlW5Kq44w2Sup2q2KjmxgbNxdgcNllMRAEPE4cQSDbyZ2YWAExDkGSRcMgRPBIlqA2xAMSVYAQQYL\nZyCf749ifymK/aUpaXd7kGy3kV1LO2qWlKyoeGPuNjz+3vr2bgpBdFjSPoZhIMQQjOCmK6PTbG0W\nRlh3SfEcb2ZEVYdqMe+HA/jvqoPwXKRAVQR4oQXTG5cS6cwku1O3n491UBND6aDtIoiOxIljYTSO\ncAOAHO2aMkqDgGOIyDI4cJAkBiZpa2k0RHz476pCK36hiDAeoWwrf76zeg8KKran4jaOC8mPYXR8\nl1Q6uRQJIlWcOIIRw8JwORvVlTItDIaIIkHkRS27Sq8/JTNNKIwMKaaI4Fj0sq4A8HrB/2HWtjnJ\nvoXjht0llYyOVO0EQe9kx20IIh05cQQjhoXhaSIYepYUr0JSZIi8gFBENoVE1ifzRVkYrKmFYWDM\nFu9s2AUjGa6aqLTajioYHbRdBNGROHEEI4aF4XE1CuHYLAxZVSBwAgJh2YxzGIFwu4VxqFSLXZhi\nYqM23DkLE9o1QlHa3pGyTpAlRfNDCKJlTmjBcDtjC4bgrYfMFPgDCv46ZyMYixYMiFpAHLIT/oBm\nWRgzw+2zvjutYERZGG331US5pFrRMW/YVY7f/W0pSqpSn1BAFgZBtMyJIxgxYt4ZrmiXlOrPQaYj\nE3yXMiiiD4oSXbAwohoWhiYYTHaYLimj9lRYiZjn++7QcqhMBWMMSzcVobQ6kNR7ShX2zlNOhoVx\njC6pd7/aCcaAZZuL29yGlkimhaGoKpZtPor6QKTlnQmiE3HCCEastFp3Y5eUKmJkrwut92YV29gW\nBlMcZmaVYWGEbHGLXTV7sbN6Dw6U1OOjxXvwl1lrk3ErKSdeDONQaQO+WL6/1aNxtY1ZUrES3Foi\nLDWNKTVHMgXjhy0lmLNoN96c23kz5QgiFieMYHAxg95Np6FkOb3WG0MwDFFQNaHgTJeUPehtWBjR\niw4V+0oRCGnbVCh4o2A2NpVvPfYbOQ7YO3hFsVxSywuOYuHqQyipap2ldKwuqWPtwn/YWow7X1iO\nzXsrEj4mmS6pitogAOBAMVUyJtKLE0YwYloYjbOkAHgcGdYb1TiGB2OApGdCGYLBZKdtlT5NFEK6\nYAicdu4Sf5lZH5f31mFH9W7M3v5RW28npcSzMCKyJh6hSNMAf3Mcq0vqWBXj2/VFAICVW0sSPiap\nMW/zp0ZxESK9OGEEI1bQWxCafpYheszXzL52hsrb0mptMQzd+vAFNVdUSNYEY8wpV0DkRZT4y6zl\nNljT69U0hBFppfsk1dhFQra/1q2NcOTY3T0dNbaczJnenP6F2+91b1EtZi/cYT7D9mDVthJ8vmx/\nu12f6Pyc0ILBxVgbwy4YUYstMR6SEfQ2XFK2eRiBiPaZYWFkODzIEbvgaH05Xvz3Fu043upoGWOo\nrg/h/jdW4c15HcvXbe/o7C4pST42wYhXSyoQkrBxd0XcyXxMH6G3NobBmQLdijYmUzBiXP/ZjzZh\n1bZSbNlXlbTrtJbZC3fiqzWH2u36RGIEQhLmfLML5bprsyNx4ghGgr1OhsMuGBxGnN0TYy48CVB5\nax6GKIGpHKAKtpRbXTD0oLdbcKG8SoLMJJiuCcHqaINyEAdLtMqoW/e3XycSi6gYRpSFob1ubUA5\nnkvqrXnb8cbcbVi/qxwAsPtwDZ58fz3qfNFxoFjC3hzHECNPWgxj/9E6LFwdv1OWlPa3JimFuGPz\n5epDWFZQjDf/s629m9KElAvGihUrcO211+Kaa67BO++802T73LlzMWLECEyaNAmTJk3C559/nuom\nIcMl4vSTcmP2LN6oGAaPrAwHHCIPxmwuKVECZAcAzoxzGGJiWBhu0Q2oAjieAZw+UrZZGIX1R9Bg\nS7sMyiGsOroWSowZ45v2lmHBmn1tueVWERXDUJq6pEKtzUCKCnpbr38q1Kr/Fldq8yx2FNagsLTB\nFNLGIYBV20pQVpOa1ORkdaJ//XCj+Zp10BgGTVLs2BhJMnX+jpeWndJqtaqq4qmnnsL777+PHj16\n4Be/+AVGjx6NAQMGRO133XXX4S9/+UsqmxLFq3+4DDzH4bPvm3bCjV1STgevWSeqzSXFK2BG0ULT\nwtA60bAew3AJLr04IbRSIrIzatGlN7bMxmnCRQC6AgDm7luIVcVrURWqwfgB10a16Z0d70LIqsGo\nwFPIznA1afPhsgYcrfBjxDk9W/8wYqDEmbgnHWMMI56FwXGa28b4LCJr5/WHpOgTcEBRhQ+zF+4E\nALz70JXNX9B0CSXeMaakWm2sU3aAvlpRGMSm+R5EB8HwnneAn0oTUmphbN26Faeccgr69OkDh8OB\n6667DkuWLGmy3/EuSGe6p2JYGA7eYb5mjIfLIUAUeIDxCMu64vNWQUJTMHQx8ellzr0OD5i+j2lZ\n6P93dWsl0asjWtqnxyWgMqi5pbZX7WzSJiFLG4nvKyuLeT+Pv7ces77c0bSjPUZYnIl78jHGMKLK\nm9teG9+DoUkRSXvhD+pJBbZzGKOu+NdQ8fyqt7GudJMVdNa3lQcq8fKmmagKNl6X3SIVy7J3UL1I\nyuz9YyEQknCwhFKNW4QzkiY6wq8lmpQKRllZGXr16mW+z8/PR3l5eZP9Fi9ejAkTJuCee+5BaWnq\nFiK6dvjJ+OVVp8XdfuGZPaLna6gcnKIuGCoPcCq6ZLnA8aqZHWVO3NNdSUY5kFxXDqDooqILhSEc\nkwZeBwAI6gKU5XGaIlIeqIzbvgOVzT8bo8NtK/FKg5hZUq2OYcQ+t5GIYFoY+nl9IQm+oCV+HGIn\nLdgpD1RgXVEBPtjxSZNtH+38DHtrD+CLvf+Ne/xx8+u34jLBsIxguPUpzC11NO219seTH2zAUx9s\nMOepELExfukdUC9S65JKRCGvvPJKjB07Fg6HA5988gkefPBBfPDBBy0el5eX1er2/P7686Pee3X3\njihw+Ntdl6Ffr2w4HQLcQgZCiuYr79Y1AxFJBSvlwfEq8rt5cIhXwQwxMFJleRV5eVnwq35wHIeT\n8/NNC8N0RelB7755eQAAf1hzXzkcAjiH9qwkVcL+0F5c1Oc88Hy0nleEapq97+wcD/K6eWNuKyyp\nxyNv/4iHfn0hzj61W7PPqaja+oP2ZrrNaxrfJifwLT7/3YeqwXEcTj+5C6oCVuefneMxjxV4DhIA\nt9uhfSZo9/vlj4fw5Y9W4Dgjw4luXa37inXtBsGyHhwO7TxOp4i8vCwonL4YliP+76akzpqh35rf\nViAkweMSY04MjXWuzCztee46VI1Fqw9h2i+GwCHGHreNu38+AGDBCxMSaosvEMEvH/kaYy/tj9sn\nDY66tj2dN7eLF12z3QmdM5mU12i/KyYIx/T32xaO9/XagsejeTk4jutw7U6pYPTs2RPFxVYdoLKy\nMvTo0SNqn5ycHPP19ddfj+effz6hc1dUtH3t5WBQG+EzBnTxiKir1UQii89BSAmAc4YQDkmaC0bl\nwXGAIOqdfyOXVEiSUFHRgEpfNbIdWSgr85kxDI5XwABwvHZsxMfAgYfC6YHysIQan2Wqv/jjLNx1\n3q0Y1PV0RGy1qUrrK5rct/0HVVJWDyGOu+H9/25HbUMYr/17M566dXizz6XaFliuqQmY1wyFdSuq\nPhTVjj1HarFs81H89ueDzM7vgVd/AKDFG2qqY5/P6GN9/jAqKhrQ0Cg7yiAQjKC21jrHrP9swbkD\numFAb+u3c7TassxM11lYRkVFAyKS9pyliBL3d2O/55Z+W2t+KkVOpgsOkcczH27EL0efhqsvPCnm\nvo3PtftgFQ4eqcG8lQcBAKf3ycawM7W/CZWxmNl8if7W9xdr1u2XKw/i9kmDo46zWyrl5Q1Qwslx\nXx4LtbUBVFQ4j9v18vKyktJfHC9CumtZVdWkt7utApRSl9S5556Lw4cP4+jRo4hEIli4cCFGjx4d\ntU9FhVW+YcmSJRg4cGAqmxTFWf20gPMVQ/tEfZ7j1DoizhWEUxS0CX66MDhdWmdkWg+qVXyQMYa6\ncD1yXTnarGjTwlCi/j941A+m8KaLKhRRcKAsOrW2Pqz9UHy2pV+DsERlR2E1bnluKXYVVpuf2V1S\nQTmIz/bMR0PEpzWTGXMaWk46jSo+aA96c34ArIlL6rl/bsKaHWUo2NfUnSYratxaUlYMI9ol1aQ9\nKovK1vrvqkL8dc7GqH0CsmUVMS76PEYFYaWZQEVr5mG8s2AH/vHxZmzao/12P16yF8s2H03o2K/W\nHDLFArC+j6/XHsKtf/selbq7Jl7cpzliVTMwsD/b9ophdAa2H6jCii2pL3bZHJwZw2jXZsQkpRaG\nIAh45JFHcMstt4Axhl/84hcYMGAAXn31VZx77rm44oor8OGHH2Lp0qUQRRE5OTl49tlnU9mkKAad\n0gUv/P4S5GZGj3Z6ZuZhn38XOF6By8EjIvOmMBRlaCPnxhaGwhT4JD9kpsDDe3GkrMF0WxmWhRHL\neG/hPrjPFgBBQddsF6rrw3ApQXDMiQGu83CArTNX9/NL1sjX7zqChogPWc5MfLxkLwDgw+WrAUcI\nkNxRncJXB7/DsqJVOOorwR+G3mF2ynwCQwSmMkCMACpvdtQH6gohnfEtxOL+CEeiXVqc2w+hSxlU\n9awm56ppCMfNkrJiGNp7o/RIYxSFtdjJ2Z+TKuiuD92JVhfSRLesPn7QO9EYhr0dWR4rQWLOot24\n/Pw+sQ5pFpdT+0I++16bgb1lfxVGX9A3KsgvySqcjpbTmuwDhsaps2Hbs23v9cuPpZjk8eLFT7VJ\ntj8b0rvd2mDGMIz/GcPhMh/65Hm1eGo7klLBAICf/exn+NnPfhb12fTp083X9913H+67775UNyMu\nXbKapqlOPP1qLN+5B1JJfzjO4iEKvDlBz8fpFpEhGODAmCYYxmh++x4/Nh/aAqGbkVarB70NS0MR\nwFTNwuia5UZ1fRicIIHJTuzcE4HrNGteh3FOJjnAOSSsKdmAq0+5XAsK8zL2ur+B53wguO7aqJG/\nsWTsoYYi/b0uGLYMjHjWhqKq8AxdCjXshqKeAwDYXa11aI7eBxEqvihqf9fZP4ITFBRHDgKITu39\nfN9cfW6J1pnaR/JxLQxBgpBbAaWqFwAOispa7OQCNsFo6FIAFA0BGKCoCoKKH+CA2kj8DB1FjR7R\nx3s2stx0XkpbaDwp0RBXvy3oH4ooCQmGZBOFmobo1R6jLYz2FYyOOHJujKqyFhMtUkajWmTrd5Vj\n5vyfcOXQPrhpzBnt0yadE2amd2vwONyI7DsfzJ8LABB5DpwjehKNkR2lveGhMsWsVMv02AVrlCUF\n3hb/UAWAV3WfPwNEOao2VUTVrlcf0gRDLusHMA4bywoAaB1K4zbZR+heUZuAaMRAmGlhcFh86HvM\nWPV0lLvLjk/Wrsm7QmZpEJG3OqzGabWGEMpoOtFob8Mu7Pb9BHDaeaJcUk2ypLR9nP23wTlgK4S8\nIwA0AWtRMGwuqbCnBJxX8+f7pID5B6jy4ZgrIwLRa3o3Z21INpEoScL6Jo1Fx7i0L2QXjMQypYx5\nLABQ2qiisN36SMYqiolSWh1oMgGtvQUrEaQ41u7xoHEtst1HagEA63Y2zTA93pBgxOGZ2y7GpMv6\n47STciEIPDhno1RA1TbiU3koULG7qCp6m/4/7w4AnArOFdRFhNMFQ4HLof3PcQxQHOYxRqmRyqDW\n8alBL5i/C474iiGpsjY/QrT9IYqRqFFkWLW2qUw1O2qe4zB//9eojzRgdfH6mPdeI1nxFKP4oGHp\nAEBIit2BSUwTTCvXniHMQlChgvPolpIhDkoEkZNWg8+pMMXM6PB4fd4J79XOoygsZicXDMtm525Y\nGMPyz9MeR14RGLRYjp36SHQQsSZUi/21hdFus2Y6NPszLqlsWTBueW4pNu6O/4feeIEqNY6FkQj2\nTs5+vLbNOsfxSiFmjGHGO2tw72sroz5XErTMUj0PISwp2FFYbT4P+/Xs4nu8MWuR6e+NZnUEVx4J\nRhx6ds3AuEv6g+c4OAQOnKsZwWA8GBSs31MStc2wNMT8w3CctFsbsVf1wvCztJRbjlchijAtBfsK\nfuV1flTUBrFgk7Z2BgtmQY1o5zOsBrMIIgBnv59QHCwy39sXclpyeAVUPSbCc0A3txbs31a5I+a9\n19kEw+ioa8OWOyckWokKhrABgE9uwM5DNXjqgw3aB7b28Rna8cYf5+6afVAzy+E6Y6M54jRGwcw+\nQx7QXVJNO5nfv7QCr3+h1dvx68Iw/lRtljznCAGMNRGMunC0W+qx1X/Di5vehF+2xUD0Symq0qRU\ni6SoAKdC7HkQxbVWTCTbGz/rZ86i3XG3Nb4v08I4BsGwWxGSrKLWlnUWbgcLI968oHirODYEIman\nXV4bxO/+9j1WbYsuUb+vqM7MBmsrc77Zjec/KcCan0qbtPd4WBjltUE88OYq7C2qjfrcKl4ZLWSJ\nJKykGhKMBBAEHkp1tG+eRQkGh7As4XBFbfQ22z5Cdy2LRqnqhT7dvRh0UncAgCgycE6tc2cRt+nG\nWrPzKB6cuRp8Rj2YIoCFMsxyJIYY2F1SQtcyfF8zD/uOan9MQZtgzNv/FeoytE6L5zm4BK1zO1h/\nOGo/QAtu7w/9ZL43Rlp1YWtkLuVr1XUDIQlPrPmH+Xm1VInVRwrM95zNAuIztOMNwbB3xEfFjTjS\nUGyN6owZ8oarq5kYhpGZZVgYOa5s/YLasQH9/pik3XNdIwvDiPUEFMuCUvXJbw+seBSvFcyK2l+S\nVYh99sJx8m6gj/accjKdZipvLLo1M+ehqUvKsDAsKy5Rl5TdXbZ43SHc9/oqFJZqAtkeWVLxKg/E\niv3sOVKLe15dif+sOAAAWLNd68SNcjAGz3y0sUmGXCzqfGHUNMRO0zbYul/77Rws1n4TgXB0okGq\nWbDqIKrrw5g5/6eoz824lv6TtwSj9dc4WuHD58v2J+07J8FIgByvE9LBcxAptGUBKZYYaAFs1YpV\nGBZGyAsW0YLqnD5/g6kC3E4BXpfWiQgOm2CE3QAz4h4qwCngPH6ogSyYbiwADSFdMMRGMQNexfwf\ntD84Y10OA5nXOlSO40x/v8pU7KiyRr+MMby48S1Uy5YLxReQsLNqD/bVaUFvpa4buIw6bDlUhLte\n/gE1YWt0dCS0HxsjX4PTrQm7BcRn1gK8bJbgsLuGKt3b8fGuL6yRp25lGfenKGqLo+KAHESGwwOR\n10rOc4IClVkuKTWoTfybtW0OygNNV+Lzyz4Yf6EqYwjIQURUCXtrD5hpuYDWkQi52vGcU3vGWR5H\nVPpxY5oXjNguqZYsjFjuGskmCkfKtOdbU6+10e5ikY9TDMEXjC0YscR/2wHNqv16zeGkXPve11fh\n/jdWNbuPEUMzEkKOt2Bk6ll2TZ6TEXMzBUP7P9GK23aemrMBX605hI27E199sjlIMBIgv2sGHvv1\nCPx53HXWh3pw+qW7LwWLeMA5w+Yo+srzTtb2YTxCWy+LPpkqwO0U4eK1Ea8gKmZ8hEU85nnBK4Ao\ngeMYWETrcAzrwxcOAWBaJwwgvGeotl1yIcOt/QgbWw6GEPE8h5AcAqf/+2zPfEiKBEVVsXD9XjMV\nlelVeOsCIawqXqt/xkOp1WapL9tfgHhwDv3adgsjsw6us9aYHWLjWIKTt7l09OM4dwDgFCgqMztk\nvksphPzCJtf0SwFkOrVAP8e0uJCiqKZgsGCmue+yoqYdyfd1/4F76BKAU6GqDA229hX7rJIskqxq\n7QIA2QEOgNspQpaZfm/MDPAbZGVo34nj1K1wnBztBmw82pbMVQ2tDn7TngoU7LXmuDw6ex2mv/JD\nk3uwJz3U+bRnaIjDdxssd+Xxqlbrj1P/K5aFITRKgEgWzcVBGmfpBW3ttT/LzXsrzPkxycSr/602\nFifr+7EGMEDiFgZjDDsLqxGKyKabraVabIlCgpEgp/TMQj+9pAegFSn87f+ciRyvE0q5NstX6KGN\njjJdthGlKkIN299rFoZT0Kf/Cyo4l80lZRYsVK2ihYZPX9+29WAZ+NwKCF21YoQs5IUayAQnSvC6\ntX2DSgheMQMPXHCXdgpO74Q5hpAShlzfBVJlTzRIPtSG6/HV6kOYu0brzOSyk6HWafdaFwiA5/TM\nrf2DwfzapMafiovMjlGp64bIviHmLRqussZZXHyGDypjqAnV4utCrQilGtKqAwucNafBsEw4UYLn\nwm8R5GrNUanrtAI4T9llrnpoEJACyNTXY+f0DDRZZQhKIf06lmB0cWnZb/b4i3Y9GRAjUBlQbwvy\n76zeY7tOSLMmAUCMgEErLaMyBklS4Th5FzwXLgbfpRTiSbugTXRUAV6G2L0YYs/oEbSsMESUCBwD\nCsB5a82OKmxLLFi3sxyvfmGtA19U4YM/pE0U3by3An94bSWq6kJRnZzhglIUFTUNYRSWWgJ4vGIY\ngTguqVgWRpMU1iS565uL/5hZemosC0M7rrIuiNe+2IYH3159zG2org/h9f9sQ3mj0vweV+xZDcbz\nYY0sjETXhdm6vwr/+KQgytWVLCEmwThGpk0cjMv0yT0PTbgaALS1LwBkuT3RO0vWXA+m8nA7BTh0\nwdiozrO5pBpZGKaLS/9MtzCWFhwC77UCf0xyaIFiQTLrKIXkENyiG909WoBb5YxKu/ofsSzqa3oA\nYSWMo5V+0zLQhEs7T00giCMVesC6vqs1GVGQo1xwSnUvXOwZq20zBEO3FOQqK/7DGPCZrQhgeMcI\nvb36WuiCas1X0QnxNXonZ/3oxV4HIHTT4kKSIiGiSqZggIngeAWKwkz3GwtYgmEUioyVVszxShML\nY8XR1WY6bnXIcsFxDq3NxmSqyrogxJ5aDSzXaQVw9CoE5/Fh9U+lZsaXdpNWR6ooKjaUbYHYrRTu\ns9eYHX04gUKSEUnFa19sQ70/ghVbiqMyoczzq6xJxlQy/NmL1x/B9gPNL/zVGgujsbulpa4x0Tkw\nDXHcYgDA8wycK2B20AFbuRQjHmTEkuz9bWFpfdRaNi3xr+/2YtOeCnzwTXTyQ7y4XGMLsLUxjCPl\n2mDHvjBbsgw3EoxjxO7yObVHHpyC5VLJz8mE02E9WjPrBzBdUkbgWoEEzhnSOmhVNK0IIbcSYm+t\nhARTBW3tCHswWD+nQ83U0nEVBzgOCEohHK30IygH4RHd5voeKq9nYhnrkSsOK4iuhBGKKFHBd6Md\nlfV+lNQYCxoJWhsBbU6JLhiGiDiYdi2tI2Xgs7UfrFw8AEqdJlyyKkfFBKDPPTHmsLgztG1yVU8z\n/lNcXY/NeysAm8Xi6H0QzgHbAFiikOnMwIotxZAlDuAVyKrNJSW5ENl9AQCY14o5D0VQwBhDQ0Tb\nluPMRnWoBosOfQ8AqI1YmVG8JwDnoDWQnFp5lkdmr2tyOk7P9LILPOeyRpolVQFsO2C5mzbzX6Cg\nfJs114VTwGXoqdVqdCVae1B5wY+F2HbAKBOjwtF/G/icCpTXBPHOAm2kaYxo2zoPIiwp+GTJXrz4\n6ZZmR66+QMQaVNiIlSUVq6wJn10F8DIW/FioXVeWwLm076W5GIO9w20sllHt6LYP7iErUMNrIh8M\nW22V4gh2MCzjr3M24tMYa+nEw6jj1bjN9vRi+/ca18JIUDBifSfJckOSYLSS+4ZOw+DuZ2NI3tnm\nZxzHobueqgoAORkZeHX6ZfjddYO0D6LmbAhwOQUInPUZ5wrYgujWr0LsZqXpCjxvncdmfXRvGA6A\n01JyAazacQRPfPkJwkoEHtENgRfgFJxgumWh8LovVhHNa+4trsK2I0Vw9tdcUizianQt/Yet8qb4\ncYJiWQL6viLTXW9iBJzHByGnGkpdNy1+oAuNpESQ49QzmcKZ2v2qIsJ6qrA3Wxe0iBuRQ/rzE2Ts\nKKwB74ox74FXTMEoLAri/a93aRYZr0BWmCnsTHFA1WNBxrViWxgyFJuF8asz/xcCJ2BbxU6tVlgk\nuryIkFWLqowt+sEx3B+6YHA2weAzGsx9V24rwcb9tnRovhabyreas/Yd/XbAfY42XyUsKVEj68bB\nUmPlQj6rBmLeUbjO2IgFPxaiqEJfo8V9bILhC0pRKx3a/eH7iuKnuG4J/gDPsG8BRzjKqoll4TRO\nGdUBXG0AACAASURBVC1R9sN15no4T92GuXrm1OJD38M95AfwXUpRVF/S5BwG9ooHDXql5IgiYfb2\nj7CzynIvylnac68QdkNVWVSBRimOBdMQiEB116ImmHhRQEMMGmui/bu0W2OqKRjG/CRtv+bSarcf\nrDLvO5Y4JGtOCwlGKxmQ2w+3D745yqIAgG4eSzCcghNOh2CO6KIsDKa5pK7rP8b8iBOU6DTdxqgC\nnCJvm59gCUbEGHTLxjbJHNn3d2mi5hE8UFy14Lx1OJS1WGuGKpjn++KHPVEdGot4rJnsvKKvMMgB\n4CAw2xyJRllhX63UgsOcI2LGIVRfLgDOtEIiTMKuI1r7QruG6seL5qi/78n6XIxAllXt1yitos+F\nUX1WlVoICvy6NXDoqCEO2tK4ZTU+lNbVWc9Hb0NIDmPzngrUhzXTfdzJ43CaMNw8X3lNED/sKAQA\ndPd0g4Nz4VBFDZYXFKNe1s7X3WG52VzQ3F1mMNyGZWFYLinnqdvhOtcKvBvZVgYO3mH63oXuWiE8\nMf8wnl/xEd7+cru5X60vjltEjD2qNrJyYsUwGGPYWbUHSw+vaLLtyffX4+G315hCYff1Hy5rwJqd\nxZAVFXW+6LphRdDmyAi5ZVGxhFgWRuNRcbWi/Zb4HMv6MuYNuU4rwMvbXjMXHWuMPYXYsDCe+Wou\nNpVvxZtb3zW3CbL2vQWcxXhj0cqoCgZGsLix66vSXwf3OatxOGdhzGvHwhDoxnEa+3Owx3tMC6PR\n/cQTjK37K/Hiv7fgrXnabyOWNiQrz4EEI0nYLQxXIzGxp+ACgNspINPpxbhTr7HtE7+sF1MFnHda\nd2t+Aq+YgdeI3tf07qIFcjlRMjvy+Qsi+GFrMer8WkfrPtsWuJOtWeWcIJsBdqUhFyycYbuWNlHN\neJ/tdWvioQuJ0T7thQAmi5pLqpGYGBZGfTCA0toG/Tjt56fKvFkKJcRrI3g1mNVkAp8xW1wqOg1y\neV+z7XVhv3VP9mtyKg5X1gJMqzZsuOB2HKnAa//ZhrV7tNIjny85gp8O6B06r+CNudvMLK5sZxYU\nSQAnyPh+81EE9LIpl+Zei1OyTjKvA6Dp5E6j7YIE3h2AGrTWi+dt4mK4Ag3CShgRSdFcEJJ2T0Ju\nBUqFHdhSaQW/lx9dYQbW7fEd3tYOzuU3S9JogsFQK0V3tC99ugW/+9v3eH3L/+GLfV/CF4m2vCrr\nAnCduwKf7JoHILpU+vyCDfiw5GX88V+f497XV2LpJqtqL6enR/NZtVHHxJrpbc1jYfj20DL4mR4r\nsv1d5Dpzo46xx5PsRFkYumCUBLV2uQUrAYVj1t/lT8HVUccZFkYTwQhoAwZFaJSFqBOQAiitDuDl\nz7Y0qTzcuMO3p2LbRbixBRavirNBlb6WixGzUE2LxroeBb07GD0yojOoAFvVyUZi4HZqP9QshxWI\nRTMWxjmn5GHcyH4xXVL6GkxwGX8IeufPFB4Ah/e+2hXl+zeQy0+K7pANwSg7Obo9hktK/+PPyXAB\nqhgd9LYJourPBp/hM+cqGGJiWBhLtxy2soxsM+IlFobY8wB8rBo8eM2NZVoY+ig9s0ZbA9yfY2uf\njJqAXnZEFwxr4SpN1DgmAuDQv4fW4dQFtM66rF7vcGSneQ+cIOvxnDB4JsIjujWxE2R4XCIkpnVA\nua4c3HXerdrt81qasxEEz/QPtGacCzJ4XeiU2vwm3wOXUQehizbvxUiPNmJKOV5nk98ObDXMdsmr\n4ehVCPf5S+GyDQbswuUe8gOcp23W2uVxQMg/hK/r5mBd6Sb4ghIqa4PmHAiDssbzVBwR8J4ANlZr\nMZoo102u5i6K9NkA99Al+HanlW4tqppA8hn15loqQPTI+l+7Psc3hUvMDlroWoJ5+79CNbRsMuO7\nNObG2JHi1AWzz9iuD4Qwc/1HELtrLqwcwVZpmbfNE8poiBKMQCQIX8TfpIJybSh+ActN5Vvxxx8e\nxxNz/4ut+6uwYqt2TaP/53kOBeXb8O/d88BYdLmbYEiGrKhY81NplHvqQHE99uhuP9mW1LC35gD+\ntesLKKrSRIhCakBzC9s+TkahTOA4VKs9UejltToDI2X27P5dMbBvDnqc0RubGw6a242smkynJRjN\nuaT69chFZobDCi732W8GkcMh7Ufn5LSAM++t1zp42/mYypkZXAAwKOdsbLIF2DleAThj/oXuRtM7\nJtcZG8Fk69ouhwCmCOAzfHD029Gk7XLJAAg51RC66nMXDDExr2V3ZfFR2xwn70GY5SLT6YWf8dGC\nxqngM+t0V5XDFkuR8e/lO+E8xSbM9vsSFPD6SHLMsH74oASmxcIEW0kW0Qjm6+a/IwQnMsBxnGY1\neRR4XDwa5AjAAR6HC27RBQ4c6vijcJ3lg1KjLw5Wn49Tc06x2m6Ufom4oFTnm+nQAIOYd9Rsg1qb\nBzAtxhKWFGRlOBBq7F4y53hYHQDnkMA5bEvaNrJ0hBxNELxuh/m9fLjzU8gNXyOy7zwA0Vl9ZYEK\nDMjthyUbi7CjsNoUQkCb7GkXDHsRTk6UEcksQllNAHm5HiicdhznDkRlIBmdl0/yY1WxJkKXsf+n\nP/9GI2H9O919uBZ1IT+YIkAqOg3OU3aZKdONsXf8FeEybD9guf/s7WC8BMY4qHXdIORWwu+zLKvl\ngU/x1cpaBNddA3tc0V5aRlEVCLainKuOavOVkLcfqOhhDgzNET8PzNr+IQDg2n5XRnXitb4IPl6y\nF9/bLDQAeHrOBvO1PWj+8uaZAICzup0Bvy1lPBiWsZZ9As9QCcr+oWAKoNb2SNpERLIwkkSvTEsw\njHkLToeAGTddgFN7Wu6qHrkec0SQZROMU/O7xD232+HS4iH2UiM5WkZMSP9bzuP6AbILYs9CcKIU\n1YkbqasGP+3X/fpGp+uIxO3EAd3Npb93OQXzOMOtkuG0pQ0bM9sNq8ZMCbbiL11zdNcR49Et22Va\nLwAQVANwCa6oY3i3D5zbD45X9Vnv0efjmri/LOuI4xWAieA5Dh6XA1BEy5VmCIbkjC5HwqmAIwIH\n00bITI8PFVXVotrvB1M5uByi+T0D2sREo2MNBUS49ew0TpDBGWm0iojIgcHWvBxeMcUrvPsCaAkA\nDoSVMMKSApcTUUJg3C/QNO6hfWi4xmIXRfR6RG1yKLSOn8+sg5CnB9xtAfsVR3/E90dW4p/f7sHm\nvZVR82nW7i1sNAksenTr8zE8/PYarN5eAlUfwXO8ioKqzeY+isowf//X+GjnZ+ZnZufJR1sNxiDg\nHx9vRqWvQRN3Q0SKLMuoMliF8oAW77ALRkiJFpWGUNDswFVeAhQRql9LwqhnVrzEcInxuRXgnNbz\ntE84/XL38qhzu0Xtd8tn1kHIPwSfXpGBqQyctxZlmZZwheSQaWnxORX4v6VrmoiF7SmAzy2HxJp6\nCurD9VHJD5V1ISjQ3gsDNsF1+iYA8et6tRayMJJEpiP2WtoA4BasDvW5O6zO236M8WMDADWUEeXj\ndotO8BzX1D0BmCN4t+CCw5cPKfuwlhoatM7NAtlQ6rtAyNbjA4rW0ZnFEXseMjv6Jp2u2SjtmHNP\n7YbdpdGjwAyHC8afUeM2mi4po3JvVg04XgWTNZdZbpYLDbZ5CRKLWM/COMbbAOdA3dWhOKKuY3eN\n9evRBQfqbBaP7rrjVDdEgYPLoWea6Z1uhOkjcVs8B7wCzhEGxwG8rHWuiqRtq/b74OQVPQlBvy9b\n7MCwJP535CB4jHsQ5P+/vTMPr6LK8/631rvl3pt9D1khJEAgAcIWdmQTJGkWhRe1WxRFWxRwQXrU\nntHWmcYHp/vpx8exfbrtxWec0R573ufFcXoGX0VfEW1axBZwWFQSIAkhZM9dquq8f5yqU1X3XiAo\niCT1+QdS66lTt36/81vO77AEAKLfR+tJBu9qpsrOsHZCPvbM/dEQrVnl1kujdKRDSG4znxdm3EPr\nCYJP6mT3giLFVVY2FF6SR2Lns316jS2rUmjsPonG7pMAP4+6Hy0Wxnst/w/DBLNEjnWftX0ff3EK\nsHhc32l/E5BmAVE3FFXBn/Q0ZQNjBMydJ2BPrx21pXwrxDz28T3/AABQT1SiZkQGDKupX6XfEYnI\nAE+gIIrX/u8x1FXlQOOiIKoIEnXrx/YDoHEeA9eIv4BEJYQ+ngPALP0PAH86/SamFVYj2RXEZ1+0\nQ+bNb1guPISmaAaAcmiEwD3qA1jzqvpVY+kADa5yWhur/8OF9gcWopCKPgNUEWJmE5TmQrx3oBJ1\nVTnskLZQO3r7zcFmbyix1ZVojs7XwbEwLiO3jVqDtRWr4rbHBcF1rBaGWzSPCf91KrIj49jfXsk+\n4rahj84FgYdILItBxQp8y7lMoFpiD8aIlQniGEFg3Ke2IhMen/3H55XNQGJlfoZtH2LuJeV8gQ6t\nlSkgr0sCHxNjMRWsOXrlPb0xbTfdVUYW1aIpZbhr2Sh2bcMlBU2EKPCQJUGvEkyPjyIEmXMB4G0T\nEg1hzOmCRInw7F6Gu08U4z8do8+mVRSy4CqnB71pmyVb2zneuk93BSoCzhquEd1S0HqDCB80srgM\nhUGVgtKWixJ3JbuX26uAEzSo58z3YMR2fB7pvAI+UZzLmHhpVSZfRj/F7tC/xD0zQxf41gWtzOvR\nfbGjfoAuGMbJ5sRHdg6bx0EAga4Zw+IaMCbVmQJeGHYQn4TMkf8XZ6gVEvmqki5Cxit488MT+JsX\n99K5SYrI+j6il+ePrSLASVHW7/2qPSHgz6cP4L/+3IifvXYAB0/YYz/tfZ147vVPcbYr3hrsV0K0\nbIulb901u2wWlpjRCDGtGWImtQI5bxd+9QYtxmik5Z/oakJbxKz91hEyFZoJGdBE0IHgKIzLyPis\ncZiSMyFuO88JCY6mglHk6Y/VI1mFvYhUyfzgPYbLRxMR+nQa1G5rtggVqqLAsTgGEB8TIYawAphA\nNeIVNoyPsTdo20w0ARyoHzys2T94aymUZdOG29P6mIURG7wVWLtjS4hYra04Yhan4t294EX6MYwp\nzqJ+Y0MR6rW4NJWn/SPy9HyjbDofhgS97WxCogroQlDTrS6j76Sig3pCgZBwohnn6odP8kLgBfMZ\nBMUsPBmbxaUrO7ZGivFcutDo1Vd31HoDccv9GnEKEvGYmT+CAneSns4c8qH/41n6OVTo+twSIEbg\nhh/jhRvYOQDAiQlcXEaJllilYH1mfaARbRyuX4ee09qjB2rbzNGwoZxCoEJ3Zv5UjEmnc23CSgTy\nyA/Z+ZEv9Tk4hsIQFBrEVU3XrFEdIOEETMM9Z9QzUyQ9ecFUQLTvRWaBRbWw7ZlslxMj0DQN3Rq1\n9JRmGqP6n45jaGmn76IzRli3dnfhz5+fscV8DPqVEBRVs/32OTFqm+BpxBUNSJi6SOn6NvT5jnQc\nx1fBnWwA0R1O3BeOhXENcb4qkxzHsUwpm8IAkOo2zUyfvu+Bm8ZhTd14WyE9A1HgIXNm2mZsKq/N\nOlHjLQwDluranYolefXmDo2H1y0mXLbS6zKVkUcWY6wZe1mT2O0CzyGpc7Rtn0s4v8Jgqxhqpjst\nJZd+ZC5RhiQKTFm6hlM3lhrlIQg8C9hTIURAhAibbMgEsqufCS0lIlF/uP48gr+DChNNQDDB+he8\nKwS/TGMsIi9C4kXwvk5zBr0S605T6WjW1l8iOEEDQNBJ6MhR6w3aLCoA4APt+r4AzeQCVSYun24p\nhj1A1A2tJ8DOccs8OCkMGR5oUaOWma4wdMGlfFUJF9HnlRgKw514ZUYIUXBiFEJvJpTTpbRqs369\n3sAh1nZDmRjtCGnU+gjIfrgFOsiJkAh4t+lKU9vyqJvUSEKwVihg82ki6O6L4Me/j587wgpYWtyB\nRBV0a5SwthhVEgAgrIXAwVSQysnhiJ4u0q8XxeftxxHiu6C05SB6YiQIAXojvTQOZ7lXibfc9nei\n2e4hJQRVJXHK2FYRQE5sDfZG+0BAQIjlW9Sv0xOhfcsSMAA6V0nrwtuNF67eOxAchfEtYA2OxmLU\nP/LGKIwMrxko9+gun8qiVMwdn4/a4QVx1xF5Dh7eku0SY2EE3JaYBps3kaBdFuFV4LPcR+Ph0yd+\nzR1mX6PdWgbFJQuILYUCIP6jMUqgCDwy1QpEjlWxXdaYT13WjITnWe/Rqa/V7RJkyBIPrSPTdoqq\n6BaGJACKTOMT/nZwHEE0bM+sEoJn2WS5aJhHR3fY/mECyEsNskmZPxi1xrYvYEmVLg0WgxMVljbL\n+tbmTlPs/WVZ1pfz9FDfe9TNLLTcLBe23TIWQuAcDdZG3eZgQ1DBu43yLh7WT5yg4YHVVYiQMDie\nQIIHasS0cgBTKah9SVCbS+g2MQLe3w4huQ2C6mGVio1Kxka1ZD90a1iRqFXCaeCDbbS6cVuePd4E\noF9feyQgm8ouooXNwYD+PoxFxgBAyGw076G/q7AaxpHTbQgVvQ0AiDaOgHKWWjRs5C4amXAyJE42\n+1ZXQD7Zg1HDMlkbkv0uJqi1sAuwxHi+7KJtUM9lAeAAVaJl8I0MJDEKLezBkgK9phqz0CxlhCTq\nau6N9OsWhm6hnSzV+9SqMOh5N5Wstr0rowqB2poPtbnYdq8e3RWodadAOZPH+r1d/hyvHvl3fFMc\nhfEtkJdEf8RjM0bH7TMsDLfooi4TnaJMM188yWVXJiWZMXECUMHrEUylIPEinrl7KsaU0OvwmtUl\nZZYhCR+eyASAfR/g4k2LhRAeyUm0HfWli/HszCcROlCHaONw5LoKzXMkIa4UCkAtFhvEtDACXpmu\nBWJcw+KSWla6ALnuYeZpTOjGW0cuQYZLFEAiHuYyMNpAYxg8lDN0wp9r2BEAQJeRJWkR2oK+RGxP\nN/DBwRZz3ghrn2ldTMgahxJ+PPvbGpdaVW5aaETjLMrO4l4SFHbv5CSZKYZ5tdmQ3SqIItvbxysQ\nfL0Ap0HrplaoVzLjJWYBSSPuRd/7sFwP2iI0pdZDUqBEeXYOADZXROtPQl+v/nsQo+D0kv1ZkXGI\nHKmG1pcEcATJfpkpjGx3nv5cIjghimBArwLbka6P4O3W0alOah0F5CTmumtKfSMmRsfRd6wPNIz0\nY63Pb1EYUXzRaVb/JRE3KzLJSWHw/rMQ0/T0bkWCZJTQFxQ2CVRWk1i9NQgKAl6ZlaAhETfrf06M\nsNG7UUyUKCJCagjhqAo+0AbeFQJUAZnBII2b68rKmOXv66zAoQP0fby57zhaO/qZwtB6kvX0bdou\nztcBIaUVhAATc8aA0wSmFA6dpXWsSNQFrd/IxqP7enWFYc0mg6BAwYUXkxoojsL4Fkh1p+Dv6x7D\n7aPXxu0zBIwsyCxv+4l1tSjLM2MIse4qq1Ay6Asr8ImmgJ9YnoPUgNucvGONIVhy57WuNFbKnGIq\nD06zWwrpQSqYeI6n9alCSVBOl7J2A/a0W8AapJYQ+mSGZTttgyhwtFS3Ygphq4XhcYnwy5YMNEPo\nRjzQQubzcuAgCRJy032oHp6OCaWmdURUEQLPQxYFaJ3pVGD79NURozK7QviwPf5EVBH//t4XUFqG\n2awMq8IAAAHm3wHdJQUAGZ40UxmrElyyiCdvn2S+CykCjifI8Adw45wyOjlTVyZzJmYjrIbN2JOx\ndK/SiLcaqQtGC1NhkSSbAk8TdJeUIdQs5VBO9dMRcpKWgUiEp7Emyyx6EpUBRbYIySgTRElCgM6W\nD3vAccC9KyvY+i9FQdrXdD6LgtxMe+wn1sIw4i9BVwAe68zrGBcM0QRbDEMLeaC25bPf1LmeXvzH\nXjNIThTJJuDFLEspeSLArSefcLzKhLhXS4NsJCiICqKqBjFJXx2yz2/ri96IRRgDgCIhrPUjElUh\nDTvM3lPQ6wKnSbqA1yCX6bXGoh7WFz3hPpwJtUIqOKK/Lxkk5NNTogmkAlrZluMAt0tEwO2DKNP+\n+/cj/wWoEtS2XNbHctkBCKmn0aN0mX1h6XcFA6+ueyEchfEt4ZeTErqmDJcUVRj0BWemUEG4bvRa\nLCmeb5scBMTMENcJeCX4JGvWlV3J8Kop1OIC4uGYcuw6GiFmmwmf0G8PmAvBALDXvALsEwiVeCtH\n4HlMr8qlAslou+DC/SvH4o4lleA4DgGX5XmNaxMe4QPTWbaIJEjgOA48z+He5VUYV2h1pwkQBU6P\nv3BmCitAZ3kbh/WZwt7WXk1E1OYys/cDT8y/rcqc53hz8StFgqpqyE33MSEuF1IhU5CejAW1w+Bx\ni6zv2kPnQECQ4be3CaAzigGwa/tc9P2JGY1QjOKSMZbJrz57Gaf08hj97X5EIhqdk6KnJfPufmj9\nSRg5LBmV+XROESdGzPiHYMR6aJ8E/BxdD4RwyPYnY9a4XL1iMkEwzZ75Zfw7rzYbnLsHQkYTBM2N\nTG9GwgQHI1gPjdYE41x0Do4R9F08kbphQmrETEuOuOhgQFeUvByBqCtytZNa2Sw2JqjwpOjVAfqC\nOHUmRAcEQhSn2nrpipFRN6C4zHIzYoSt+24qQgkqVOw+0MQC1NHGEeB5DjxxUYUhmgFvviPPzJQT\nFPsSBRE3tLCXPq8cYuO26CnqHvRJXmhSH4TMr6DyIag9AZCI1/atyWWf4AuiL1+rSKYVLijQuPOn\nK18KjsK4yhiL+filJDZSFwX6a6nJrMKi4nlx5/gkc2RdNzYX96+swrQxObYJdMYo2Ai4c+r54xux\nghIApo3JRllekNXI4sRInMJ49NYJmFOTh+oR6Wwbx3F214LFmkm03e+VML48A//4w5mWtrtQVZqG\nKaNpgb9kt2VGfIzbIsVFLTGZtygjAMkuUwEZLin2p8UymVJRgBEFetaZItvjFVZLyaLs3DEWn6BZ\nFYa9L41ArtaTjPJhKXHXBQCvaFhunF6sEfiwmU64Ks1Kwy/un2GLE7E26QojoK+/wid1oVdoocrC\niE/pM6e/6mpEe+QsSFTG4S/68HljB+1L0eLGCnuQnuzBD+brylGMMjcIc9vo/dCn9FOFokjweiTc\nsnAkND1773/wLr0ey2jTkwYkDby3GxwH5ChVcAkyunrtgkwLeYGoG5nJHowtobEF99h3bf2W7NXf\nn2XiY7SxHADHLEZXcjdLj40co4t7ifpvhE86B9HTD6JIaDsLLJtabCpPMQIihaD20PdoXE/K/QLH\nO7+09QH7Tej127SwG1oX/RYkuMHJYXNNmDN5UBXebm3pbZd68gDFBaL/LqXSA+AFDUTlkROh5WKM\n9VjkokO2e9sGYRZIxLRmeF8HVC5qq5D9dXEUxlVmWm4tvl+5GuWpZSjM9iMvw3fBMsaAXWE8fMtE\nVJWmQxR4uCXLaD6m2JgxpwBAvMJIkHW17vpK8DzHsrU4Vz8CMQqjOCeAtfPLael1C7bgpW0mMGdZ\n4J6ek5mi+2CtzxxTHSLda5kFH9P2FDcVUrFzXZJdZuoxUQWmhAEzPREAJpTms7LfAMeCnDzHo7LQ\ndNVZLTSPZL9Xrs+sXBuIcRdGvhoJEpUw0lVL54ggPmXZrSuMmhEZmFkyDi7ehY9a6Mxoj+iB1y3i\nb2+rxTBSYzvPUBheyW4hcqpl5r3lXXRGOi0uOIDjCHhXP3PdkKgMTSPwy0m0/Iv/HBN4xj0MIdQX\npdlkRJFoZhyA22uXQiAy+jU9vVQVMboklQn6kBKCKOuz7BUZR5o68L8/OGJruzEq9rpFdITtpdON\nZ/G4JJqRxZsTH3m9ijLpC0DrS4LqPwWS1MbaAQBpoHEtKfc4iEDb3tMfxeiSNKT5/OBFBYvqqJIy\nrG4jeQAAQoSWJmHKWBfW1HWnmNYDAJ8eT5RLP2FtiCrUqiME4LzdZu2xvjL9nvR3KfjPgfN2IuD2\n4cHV1QCA1n5zFjq9d0xsy0L0VIm+3DPdJ+UdB/GcYwkG3wRHYVxlZEHGxOxq8ByP7y8aiR//YOJF\nz0lxJ2Pl8GXYMv5u+7VkgZWdOBemPvrKIipsq4vy2XFG/MDjElGSG7C7aGIo8NOApkeSUD0iPtie\nkPNU3r11YTmCxshfz5PPTDaFtzECCql2X3a2pU6XITSeuXsq/v7OyUyhSTEKI9Vtr2wqWCwM6yz4\nJMlnsz4Ml4ZHcCPoNa+Z7DUtB3dM2u+SCebaKF6LMgcAtaUIoY/nYFp5iem600SED9WyY9J0K04U\neNx8XSUKg6Y7zbA+slK8eHjuTfh+5Wrz4npbjbXMDXjVFAzKqVJInEWBxKz+CIBNliNRF8JRFSIv\nguvIB+8KQUg5oz+XBwGfzEa0Lx9+FZwUBYnKdAY9gNqKbGQlmckNRJFQlO3HjrtnAQDeP/0R5k2l\n7qHjjf14+vd/gdKab6tHZQT9PS4RTT2nbM/F0psFDhyvlzZJp242v5EFSMzEBgiKTcCnCFkYnlwG\nTg4jTPoARURuuk/vQw8kt4KRZW57PxEeoU+m256J/V+1KAxRQW5yEE+so++1NjgbRBHB+2g8ROZd\ntMAi4aGeyQfv7oOoZ34ZvycjC83AL/tYSfqCpFzbPkPx2+ZX6Rjl/+OKnl4gXX2gOArjOwTHcXGj\n9fMxq2AaSoJFtm1uSWCZM8aodU5NPratHY/66aXmgfoo/b4VVchJ8wKqBKGtDBVCTAorgMXF12F2\nQR22zbyDZkCdh4fXVONHN9NsIbUzsWLRNIKyZOp/NjJsMlLMEdyDE36IMemVmJpbazsv22dJk9U/\nghS/C5kpXqYwYl1SAi+gIf8maCEPtO5UiJb5I2qXmYHmO4/CUImKJI+pMFycKYRjlZMk8phTMB0i\nLyLHZ0/ppXA2C+p7M0qw1KJkioL2NGlrIUt3zKhwQtY4TM6egNrsGty3YizuvGEU0j1p4L+YxALs\ngmY5R5Uw1jeV/TkqP4cJoegXMVl7UReum0DbInXnsc1EFeCWJTyxrhar6uis8rMhfSEpRbYp46DF\nFUhUEa3n+pnSA4C3mvRZ2Cy+ISP8V7N9rNSNLGBa7iRb8wxlIvI8KyjJe6k147aU5bAtiWwRxpkc\nvgAAFtxJREFU8KLAI91jWqsF6SnYciNNcy0JFiGqKdjXoseHrNcIe01LzSqg9Wu7KmgBxaxAEHkZ\n1MIszciB0mJm6vkkD1vRT23T0131+AYb+SsyIkfNWJnVcrxr7A+QKZjXY0kiSoLBGYvL2U11NdGx\nl4ijMAYRLklA9MtRiDaVob50EQBaUrksP2gTikb8QBJ55iKSzoxCTdr4uGvKgoQVw29ga4Ofj/Jh\nKSjVM7u0zgxET5aiyluHm+ePYMeoGsGkbF2p6EI74DU/wAJ/Hu6q+n6c6Wyr06WPFg0BnHoelxQA\njEorR/jATJB+P7xuy8dicc/5ZR8k0bJuQBd9zpAaZusVGGuwGyNhI25i5XtlS/DszCfhERMnEFgd\nc0umFmFpbTn7Oy9m9GhVOt6Y63Ech5srV+HWypswtiwdkyqpciFdGQgfnIwcbTRSQxVI8ZsCLyCb\nQjw3OYWlWmvdabbFqB5YMYnFc1xRM8OLKBJ8bhF+r4yZJTU2F5zEuVj2HGBXGOlJSbhuYgGrZmDF\nNlK3JB4Ygxm3LGLliGUYlzHG3GfUPhN4/GjyJtv1rKnYVrebVcBLAm+LbWX6A6yfjBU0P2r5S/w1\nwLE4H5s5DqA4RtFb331mssfWt92WQlKGK9Fg0cQy9n/N4i61Zj0mu4IYhrHmSUb/kfhBnNG3WmeG\nrSpEW/s3D3w7CmMQ4ZIFQBOhnCpLKLhMU1VflIfnMFUPLC+ZWmRzD31TlJPDUe4ej9k1+Vh/QyUC\nPhkTK7JQmVaOB8bfg++PXoX1SysvGq8xmJozEZWp5Vh3fSVumFbEtpsuqXjT3Mg6AwC/165QNo3d\niDtG3wyP6LEpU8OdUZlWztYrCPpkyIKA0P6ZSGqahYnZ1XH34jguYRbcTXOHQxJ5jCy0VyPmOR4z\n86diTsF0SDECNccikD1SYgUUiyjyIH1BjOCn4oeLpuMnG6axfUGX6U4LyH4MyzLjLJolQ84q7GVB\nBukz/67Q2y8LMlaOWMa2zxtXauu/ZIsy3bC0GqW5QXAch3UxKeW2YK3NzWO4pARIvIiiQEHcPlHg\nkO3LtGULWgcZomYpkWMZVQsCh6ClfdZvpDhQaMbXABDF7r5RWwtsbQAAn5qNGcEl7G+rRZAacGFq\neTH7Oxw2r11XUQTrEGJ4bjqeXj+ZXt+iMGLffapo/i58kg8TR9KBhW0FSgAjcvQkFMIjcthirV9g\nkbaB4lSrHURcyGUEAOHDtXopCl1hCFSQPb9lJmRJQGfv5cnVZugW8eTKbEyuNH/sxcFCFMcP0i/I\n/6pYmXA7C3rz8RaG12X+vA03zMYVVTh2shNlafkAqHKwWV+qBOHQfKy7czr6iglOn+3FzQvK8S+7\njgCKC66o74Iz92OZP7EA8yfGz8wHgFUj6hNutwpJcYCZLRuXV+EP7xzDwknDEPDJyMgwlYTVOgzI\nfvgyLLP+LQLKOodEFHmo57LAJ3XSkicWhZtvsYh8MTGboMWasQrxmswq/IvkM+s+2Xzv1oQH+n9D\n2VvbZMYwaP+nulPQHaUuKTp5sRcuWcDT62bhRx/sjrsPz3M2C8Mq4CVBQoY3jZVIJxEXinMCKM0N\n4PjpLvzo5tlY/+vfQOtJxuiSVBz+6hyWTivCud4e7NZj81ZrkOM4rJo+Gvve/QN9HsENoxwjITy8\nfBL6tG4QjYfEi5CM9FuL8pRjftMLa4vxJz1hbNPy8Th4UMNHh1sRPjgFj24owzP7fgEAaD5jqV1F\neD1BQLvwMtADxLEwBhEu+SI/CE2wuWOIvtCvrCsawz1kdWd8HQwBWT4s+SJHfnMyPelYWDgHM/On\nxu2zpqL69WcbV5aO5TNLbcfZFAYAF+eFW3QhNeDGI2vHIz8jCafO0s89mPTNA4cXQ+RFXDdsFoB4\nd9X5KMkN4MHV1XGZbACQ6gli1Yh6ZHrSURwchorCFEwbnY07llSymFehv8Am4JfPLIHSRu9dmVJp\nu55X8uDGEfVIknwoCgyz7Svw03MkXrQLe1jWvdd4WMvSrJk3nC13y3vpxDPjt2i9htXCAOyJDR7Z\nFLRBj0UhWq0XYs+ei7XCc7xm7GjNrFF4aHU11lw3An9zywRwHAe1dRhIXwCFWX688OBsFOcEkOIz\nrZzYZA2rS2ndItO11tsfZen0RsxGZoM9jrmR+hR7xV+XJGBTzQbMyp+GAn8eJlZkQuA53LGkEmmW\n2MzyGcNt5xluViNu+E1wLIxBxMUsjFnjcvH2fjPzJLZEN8dx+NnGujgBeqncNHc4GqaXXFyBXQY4\njsPS0oXn3WcQ65KyYk25BZBwzkN3H7W+aisSBbUvP8tKF2FR8bzzlsa/FFySgJlZU21Kdd0SqgQq\ni+ohSEvhkz22/hpdnIZfbVmMzvB0c20PCzPyp2J63pQ4l2JxsBBPTfsbcBwXF7BPd6fiq65GWiLe\nQpJHwuphy7Fj33PoPkWVeWYqFbb5fovCVI15SnpKttda0ZkqBlUltjZpFrcaIQRZlnO8Me0zrpfm\nTsG88YWIZerobLz/12akBszz3LKA6MlSSHnH4jKZrO3IS0nB8PwuHGnqRG9/FHmBTJzsb2QLZFnL\nAkWOjkNwxP9gYdHcuDaUJRezxJH0oAe/fGg2ezaDuqocVgZ9XFk6XMHJOID/w9xq3wRHYQwiLqYw\nblk4Emvnl6OxtQefN3YgOzU+ZnEhwXpJbfkWlMWlYLikEiHFKM7Z1Xlxx2xcUYX9R9owZVR23L4r\nAcdxl0VZANbRazwXs5is8Y9Yzhd/ssZCrFRnVmFf6ydx271uEQX+dPy07m9x5wc0iypLz57z25Yx\npuLKr7/L2QV12NW4GwVJuZB66TtU9USFm8ob8Lu3Dphr1INaGLIl1hUbjJ+ZPxUCL2B2QV3C9n9/\n0UhUD09H9XBT6QR8MpSTZfBHCzB+9riE5wF0lvniyYX42WsHMG9CATwZSfiwZR/bb/0NPnvnXEji\nfHuixkXgOA4NZdfHpc5WFKVAVZOx973ZQPT838BAcRTGIOJiCgOgftzCbD8Ks88vCAYjfu/5Pxbr\n8pX/eG9dQrfO6OI0jC5Oi9t+LTCQ38W3wdiMUZiQNQ4dZ2R8atlupJJLotnOjGTTXfS3Ux7Gm4c/\nxFu99L2kJ5vK5KfTfwwOHP5jD11kyBhoT8+bgvTp5XjtnWP44nS3vo/uTPekoa3/LOy5azQetrRk\nwXnbLwo8xpfbLUyfW8LT66cgySslVKC3j74ZRzqOISAnYWyZn8ULVS0NxYFClKdQi4rjOFw/pRA5\nad6v7facN2xm3Dae4yDKvC3V+JvgKIxBhEsWsKF+NBudOZhcyMIw1qj2uMSEyuJaxyV/N0KVPMez\ncvDaTILb/4Eu0+pOYI1a3aLpnjRMzpiKt7APxTl268WwwhItaFVRlIpHi1Jx29+/BcBUJptrNuC9\nU3sxKbsm7pyvQ1YCS92gOnMMqjPN+IVh7Qm8gAcm3GM7Nja2djkQeA6CMLBMxIFwxRXG7t278dRT\nT4EQguXLl2P9+vW2/ZFIBA8//DA+++wzpKSk4Nlnn0Vu7sACfQ7xGKl2DpQlU4vw2RftbC2PRPSF\n9bURLsEFcC0x0Mmg3yY8x+HJ2yfh4JftKMk1lcC9y8fYStwYlOUHsfnGsRienziRYiBC0fDyB10B\nXF983ddq97UGz3PQNHLxAwfIFf1CNE3DE088gZdeegmZmZlYsWIF5s6di9JSU5O+9tprCAaD+NOf\n/oQ33ngD27dvx7PPPnslm+UwhPjejBJ8b0bJBY8xXDaJYjrXMo/eOoEF67+L5Kb7WGkOA2t8IJYL\nuQQvpBR9Hgm9/dG45IahgFsW0NN/eSrVAlc4rfbAgQMoLCxEXl4eJEnC9ddfj127dtmO2bVrFxoa\nGgAACxYswJ49e65kkxwc4lg5uwzXTSjA+htGXfzga4jinACqStMvfuAgIJFLyuAnd03FxJGZmFOT\nf95jBhuP3joBs6vzUDMigxX4HJ5/iZOfEnBFLYyWlhbk5JiLwGdlZeHTTz+1HdPa2orsbJp5IggC\nAoEAOjo6kJx85XP4HRwAGrtYPW/4xQ90+M7i8+iT+hIojtL8ZGyoj1/tcjBTnBNg8Z7RxWnYfONY\nlOR8xxVGbIntgRxDCBlwuQgHBwcHAJhQnonjE7pQNybn4gcPQS5Xht8VVRjZ2dk4dcqcKNbS0oLM\nzMy4Y5qbm5GVlQVVVdHT04Ng8OKa0Fr6YKjj9IWJ0xcmQ60v7lsdXzzTYKj1xZXiisYwxowZgxMn\nTuDkyZOIRCLYuXMn5s61z16cPXs2Xn/9dQDAm2++icmTJ1/JJjk4ODg4fE04MhC/0Tdg9+7d+MlP\nfgJCCFasWIH169fj5z//OcaMGYPZs2cjEongwQcfxKFDh5CcnIwdO3YgP3/oBKccHBwcrhWuuMJw\ncHBwcBgcfPdm9Dg4ODg4fCdxFIaDg4ODw4BwFIaDg4ODw4C45hTG7t27sXDhQixYsAAvvPDC1W7O\nFWfbtm2YOnUqli5dyrZ1dnbitttuw4IFC7Bu3Tp0WxYMfvLJJzF//nwsW7YMhw4duhpNviI0Nzfj\nlltuweLFi7F06VL89re/BTA0+yISiWDlypWor6/H0qVL8Ytf0JXWmpqasGrVKixYsACbN2+Goijs\n+E2bNmH+/Pm48cYbbanugwVN09DQ0IC77roLwNDtizlz5uCGG25AfX09VqxYAeAyfyPkGkJVVTJv\n3jzS1NREIpEIueGGG8jRo0evdrOuKB999BE5ePAgWbJkCdv205/+lLzwwguEEEL+6Z/+iWzfvp0Q\nQsjbb79N7rjjDkIIIfv37ycrV6789ht8hWhtbSUHDx4khBDS09ND5s+fT44ePTok+4IQQvr6+ggh\nhCiKQlauXEn2799P7rvvPvLGG28QQgh57LHHyD//8z8TQgh5+eWXyeOPP04IIWTnzp3k/vvvvypt\nvpL8+te/Jlu2bCF33nknIYQM2b6YM2cO6ejosG27nN/INWVhDKQ21WBjwoQJCATsJZ2t9bcaGhpY\nH+zatQv19XSd6LFjx6K7uxttbW3fboOvEBkZGaioqAAA+Hw+lJaWoqWlZUj2BQB4PLQ+UCQSgaIo\n4DgOe/fuxYIFdD2HhoYG/Pd//zeAwV+vrbm5Ge+88w5WrjTXff/ggw+GZF8QQqBp9hUNL+c3ck0p\njES1qVpbW69ii64O7e3tSE+nReUyMjLQ3t4OwF6XC6D909LSclXaeCVpamrC4cOHMXbsWJw9e3ZI\n9oWmaaivr8e0adMwbdo0FBQUIBAIgNertmZnZ7PnPV+9tsHCU089hYceeoiVFDp37hyCweCQ7AuO\n47Bu3TosX74cr776KgBc1m/kmloAgDhTRi5Iov4ZbHW5ent7sXHjRmzbtg0+n++8zzfY+4Lnefzx\nj39ET08P7rnnHhw7dizuGON5Y/uCDKJ6bW+//TbS09NRUVGBvXv3AqDPF/vMQ6EvAOCVV15hSuG2\n225DcXHxZf1GrimFMZDaVEOBtLQ0tLW1IT09HWfOnEFqaioAOkJobm5mxzU3Nw+q/lEUBRs3bsSy\nZcswb948AEO3LwySkpIwceJEfPLJJ+jq6oKmaeB53va8Rl9car22a4G//OUveOutt/DOO+8gHA6j\nt7cXTz31FLq7u4dcXwDUggCA1NRUzJs3DwcOHLis38g15ZIaSG2qwUjsSGDOnDn4t3/7NwDA66+/\nzvpg7ty5+OMf/wgA2L9/PwKBADNFBwPbtm1DWVkZbr31VrZtKPZFe3s7y3QJhULYs2cPysrKMGnS\nJLz55psA7H0xZ86cQVuvbfPmzXj77bexa9cu7NixA5MmTcIzzzwzJPuiv78fvb29AIC+vj689957\nGDFixGX9Rq650iCJalMNZrZs2YK9e/eio6MD6enpuPfeezFv3jzcd999OH36NHJzc/Gzn/2MBcb/\n7u/+Du+++y48Hg+efvppjBo1OBYF2rdvH9auXYsRI0aA4zhwHIdNmzahqqoK999//5Dqi88//xxb\nt26FpmnQNA2LFy/Ghg0b0NjYiM2bN6OrqwsVFRXYvn07JEkaMvXaPvzwQ/zqV7/C888/PyT7orGx\nET/84Q/BcRxUVcXSpUuxfv16dHR0XLZv5JpTGA4ODg4OV4dryiXl4ODg4HD1cBSGg4ODg8OAcBSG\ng4ODg8OAcBSGg4ODg8OAcBSGg4ODg8OAcBSGg4ODg8OAcBSGwzXNqlWr0NDQgOuvvx6jRo1CQ0MD\nGhoasG3btku+1u233z6gctePPPII9u/f/3Wae0kcPHgQ//mf/3nF7+PgMFCceRgOg4KTJ09ixYoV\nF6w+apSKuFZ49dVXsWfPHuzYseNqN8XBAcA1VkvKweFS2LNnD7Zv345x48bh4MGDuOeee9De3o6X\nX36ZLaizdetW1NbWAgBmzpyJl156CcXFxVizZg2qq6vx8ccfo7W1FUuWLMH9998PAFizZg3uvvtu\n1NXV4cEHH0RSUhKOHTuGlpYW1NTU4OmnnwZAa/M89NBDOHfuHAoKCqCqKubMmYMbb7zR1s62tjZs\n2bIF586dAwDU1dXh9ttvx3PPPYe+vj40NDRg0qRJ2Lp1Kz7++GPs2LED/f39AICNGzdixowZOHHi\nBNasWYMlS5Zg3759iEQiePzxx1FTU/Ot9LXDEOGbLNbh4PBdoampiUyePNm27f333yeVlZXk008/\nZdusi8scPXqUzJo1i/09Y8YMcvz4cUIIIatXryZbtmwhhBDS1dVFamtrSVNTE9v37rvvEkIIeeCB\nB8jatWtJNBol4XCYLFy4kOzdu5cQQsiGDRvIL3/5S0IIIY2NjaS6upq88sorcW1/8cUXyWOPPcb+\n7urqIoQQ8q//+q9k8+bNtrbX19eTs2fPEkIIaW5uJjNmzCA9PT3kq6++IuXl5WTnzp3s2WfNmkUU\nRRl4Jzo4XATHwnAY1JSUlGD06NHs7y+//BI///nP0draCkEQ0Nraio6ODiQnJ8edu2jRIgCA3+9H\ncXExTpw4gby8vLjjrrvuOogi/ZQqKytx4sQJ1NbWYu/evXjyyScBAPn5+cySiWXcuHH4/e9/j2ee\neQYTJ05EXV1dwuP27duHpqYmrFu3jhWkFAQBjY2N8Hq98Hg8WLx4MQBgypQpEAQBX375JUpLSwfa\nXQ4OF8RRGA6DGp/PZ/t706ZNePzxxzFz5kxomoaqqiqEw+GE57pcLvZ/nuehquolHTfQdRbGjx+P\n119/He+//z7+8Ic/4MUXX8Tvfve7uOMIIRg1ahReeumluH0nTpyI26Zp2qBa68Hh6nPtRAAdHC4C\nGUD+Rk9PD6tO+sorr5xXCVwOamtrWVnpkydP4sMPP0x4XFNTE5KSkrB48WJs3boVf/3rXwHQtS6M\nMuYAUFNTg6NHj+LPf/4z23bgwAH2//7+frzxxhsA6BKlAFBYWHh5H8phSONYGA6DhoGMprdt24b1\n69cjJycHkyZNgt/vT3h+7LXOt+9Cxz366KN4+OGHsXPnTpSUlKCmpsZ2P4M9e/bgt7/9LQRBACEE\nTzzxBABg2rRp+M1vfoP6+npMnjwZW7duxXPPPYft27eju7sb0WgUBQUFeP755wEA6enpOHLkCFau\nXIlIJIIdO3ZAEISL9omDw0Bx0modHK4Q4XAYkiSB53m0tLRg5cqVePnll1FQUHDZ72VkSb333nuX\n/doODgaOheHgcIU4fvw4HnnkERBCoGkaNm3adEWUhYPDt4VjYTg4ODg4DAgn6O3g4ODgMCAcheHg\n4ODgMCAcheHg4ODgMCAcheHg4ODgMCAcheHg4ODgMCAcheHg4ODgMCD+P4xSKOOE0RxSAAAAAElF\nTkSuQmCC\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x7f72fab5e290>"
+ "\u003cmatplotlib.figure.Figure at 0x7f97f1e98d90\u003e"
]
},
"metadata": {
"tags": []
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe8AAAFnCAYAAACPasF4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJzsvXe8XVWZ///e5dTba3pCQiAJCSWE\nIJGmoSSgjsg4gmCb4Tf+dCwURUdEQXGs41gYFQvDiIyIiKIIJIAgEBJCgJBKertpt59z76m7fv9Y\nu55zboiQBCL783rllXt2WXvttfden6et55Fs27aJECFChAgRIhw1kF/vDkSIECFChAgR/jZE5B0h\nQoQIESIcZYjIO0KECBEiRDjKEJF3hAgRIkSIcJQhIu8IESJEiBDhKENE3hEiRIgQIcJRhoi8I7yp\nMW3aND796U9Xbf/iF7/ItGnTQsfdcMMNoWOWL1/OBz/4QQB2797NCSec4O3btWsXH/vYx1iwYAEL\nFizgkksu4bHHHgPgpptuYuHChSxcuJCZM2fy9re/3fudy+VC19A0jfvvv/9vvq/Vq1dz1VVXHdSx\nDzzwAF/72tde9bVcvNbz3wi46667+P73v/96dyNChFeE+np3IEKE1xsbN24kl8tRX18PCBJas2ZN\n1XErVqxg/fr1IZIeCZ/97Gd597vfzW233QbAqlWr+PCHP8zDDz/MV77yFe+4+fPn8+1vf5vTTjut\nZjvr16/n/vvv55JLLvmb7umkk07i9ttvP6hjly5dyvnnn/+qr+XitZ7/RsAHPvCB17sLESIcFCLN\nO8KbHm95y1t49NFHvd9LlizhxBNPrDruuuuu4+tf//pBtblp0yZOPvlk7/fJJ5/M4sWLGT169EH3\nq6+vj09+8pO89NJLXHHFFYCwAPz0pz9lwYIFmKbJypUrufTSS1m4cCEXX3wxS5cuBYRV4IILLgDg\n1ltv5atf/Sqf+MQnOO+883jve99LT0+Pd53ly5czffr0qmu98MIL/OM//iMXXHAB73vf++jq6gKg\nu7ubD3/4w1x88cWcf/75fO9736vZ18p7ueqqq1i4cCHz58/njjvu8PatXbuWSy+9lAULFvCBD3zA\nu85I26dNm8b+/fu9893fy5cv5/LLL+fqq6/mM5/5DAD33nsvF110ERdeeCFXXnkle/bsAcC2bb7x\njW8wf/58FixYwC9+8QtvrL74xS8CsH///pD15MknnwTAMAy++MUvsmDBAi644AI++clPVllMIkQ4\n3IjIO8KbHhdddBF//vOfvd8PPvggCxcurHmcbdssWrToFds855xz+PSnP82dd97J1q1bARg1ahSS\nJB10v9rb27nuuus45ZRT+PWvf+1tt22bxYsXoygKX/7yl7nqqqtYtGgRH/3oR7nppptqtrVo0SJu\nuOEGHnvsMdra2rjvvvsA2Lp1Kx0dHYwbNy50rVwux8c//nGuu+46Hn30UT70oQ9x9dVXA/C///u/\nzJ07l4ceeogHHniArq4uLMuq2VcXP/nJTxg/fjyLFi3il7/8Jd/97nfZt28fIISiq6++msWLF3P+\n+edzyy23HHD7gbB+/Xouv/xyvvvd79Lf389Xv/pV7rjjDh555BEmTpzIj3/8YwD+9Kc/sXr1ahYv\nXsx9993HXXfdxerVq0Ntff7zn2f69OksXryYn/3sZ3zuc59jcHCQJUuWsHv3bhYtWsQjjzzC1KlT\nWbly5Sv2LUKEQ4mIvCO86XH66aezefNm+vv7KRaLrFy5knnz5tU89oYbbuA///M/KZfLB2zzO9/5\nDldeeSUPPPAA73znO5k/fz533333Ienv2972Nu/v+++/n4suugiAOXPmeNppJU477TTGjRuHJEnM\nmDHDI85ly5bVvNcXXniBUaNGceaZZwLwzne+k127drF3717a2tpYsmQJzz//PPF4nP/6r/+is7Pz\ngH2+8cYb+dKXvgTAhAkT6OjoYPfu3Wzfvp3BwUHOPfdcQJitb7311hG3vxKSyaR3P21tbbzwwgue\nteO0007zxuepp55iwYIFxGIx6uvreeihh0LWlkKhwPLly/nIRz4CwKRJk5gzZw5PPvkkra2tbN26\nlUcffZRiscg111zD2Wef/Yp9ixDhUCLyeUd400NRFC688EIefvhhWltbOeuss1DV2p/GzJkzmTt3\nLnfccQezZ88esc1EIsFVV13FVVddxdDQEIsWLeLrX/8648ePf80TfXNzs/f3Aw88wJ133kk+n8ey\nLEYqVdDQ0OD9rSgKpmkC8Mwzz3gEFcTQ0BBdXV0hC0Q8HmdgYICPfOQjWJbFV77yFXp6erjyyiv5\n1Kc+dcA+r1mzxtO2ZVmmt7cXy7IYHBwM9U1VVVRVHXH7K6Gpqcn72zRNfvjDH/L4449jmib5fJ7J\nkycDMDg4SGNjo3dsOp0OtTM8PIxt21x++eXetkKhwBlnnMFJJ53EjTfeyK9+9Ss+//nPM3/+fG66\n6aZQexEiHG5E5B0hAnDxxRfzve99j5aWlpo+2yCuvfZaLr30UsaPH19z/8DAAC+//LKntTY2NvK+\n972Pp59+mk2bNh0yLa27u5sbb7yRe++9lxkzZrBjxw4WLFhw0OcbhsGaNWtqCiGdnZ1MmTKF3//+\n9zXP/ehHP8pHP/pRtm/fzr/+678yZ86cA17r+uuv58Mf/jDvf//7kSTJG4OWlhYymQyWZSHLMrqu\n093dPeL28ePHI8uyJ3xks9kRr/nQQw/x+OOPc9ddd9Ha2spvf/tbHnjgAe+6g4OD3rF9fX0kk0nv\nd1tbG4qicN9991FXV1fVtrs6IJPJcMMNN3D77bdz7bXXHnAMIkQ4lIjM5hEiALNnz6anp4fNmzdz\n+umnH/DYzs5OrrzyyhHNuKVSiU9/+tM8/fTT3radO3eyatWqEaPKR4KqquRyuZoa9cDAAOl0milT\npmAYBvfccw8A+Xz+oNpevXo106ZNIx6PV13r5JNPpre3l1WrVgHQ1dXF9ddfj23bfPnLX+aZZ54B\nYOLEibS3tyNJ0gH72t/fz6xZs5AkiT/84Q8Ui0UKhQLHHHMMo0eP5pFHHgHgd7/7HV/+8pdH3A7Q\n0dHBhg0bALjvvvuQ5drTWH9/P+PGjaO1tZXBwUEefvhhb2zmz5/Pgw8+iKZpFAoFrrjiCjZt2hQa\n93PPPZff/OY3ABSLRb7whS+wb98+7rvvPn70ox8BwgoyZcqUgxrvCBEOJSLyjhABkCSJCy64gLe+\n9a0jkkEQ//Iv/4Ku6zX3jR07lp/85CdeVPiFF17Itddeyxe+8IVQBPrBYM6cOfT09HD22Wd72qaL\n6dOnc84557BgwQIuu+wy5s+fzymnnOKtPX8lLF26NOTvDl4rFovxwx/+kFtuuYWLLrqIT3ziEyxc\nuBBJkrj88sv53ve+50W4z549m3nz5h2wr1dffTWf+MQneNe73kWhUOCyyy7jS1/6El1dXfzgBz/g\ntttu48ILL+TPf/4zN998M5Ik1dwOwvJx88038+53v5tUKuUt8avEO9/5TjKZDBdccAGf+cxnuOaa\na9i/fz/f/OY3ufjiiznrrLO48MILec973sN73/teTj311ND5N998MytWrGDhwoW85z3vYcKECYwZ\nM4bzzjuPdevWceGFF3LRRRexZcsW/vmf//mgxjxChEMFKarnHSFChAgRIhxdiDTvCBEiRIgQ4ShD\nRN4RIkSIECHCUYaIvCNEiBAhQoSjDBF5R4gQIUKECEcZIvKOECFChAgRjjIcNUlaenuHD2l7LS1p\nBgcLh7TNNyOicXztiMbwtSMaw0ODaBxfOw71GHZ0NNTc/qbVvFVVeb278HeBaBxfO6IxfO2IxvDQ\nIBrH144jNYZvWvKOECFChAgRjlZE5B0hQoQIESIcZYjIO0KECBEiRDjKEJF3hAgRIkSIcJQhIu8I\nESJEiBDhKENE3hEiRIgQIcJRhoi8I0SIECFChKMMEXlHiBAhQoQIRxkOK3lv2rSJ888/n7vuuqtq\n39KlS3nve9/LZZddxo9+9KPD2Y0IESJEiBDh7wqHjbwLhQK33HIL8+bNq7n/a1/7Grfeeit33303\nzzzzDFu2bDlcXYkQIUKECBH+rnDYyDsej/Pzn/+czs7Oqn1dXV00NTUxZswYZFnm3HPPZdmyZYer\nKxEivGmhGxZL1+6jWDZe76542NuXZ822/te7G0cNXtjYy879wyxduw/Lsl/v7rxq9GWKrN8x8Hp3\nA4D9AwVWbekDoKyZPPdyN7Y98tjmSzovbOw54DFHGoetMImqqqhq7eZ7e3tpbW31fre2ttLV1XXA\n9lpa0oc8Z+xICd8j/G2IxvG143CN4d2PbOTXizdw3twc11x+6mG5xt+Kf/nm4wDc/+13oSiHTn/4\ne3wP9/Tm+NEf1ni/48k4F8075rBe83CNo/vcf3XzQpobEoflGn9rX+79+jv4+d0vsmzNPmRV4aK3\nTq55/I9/8SzPv9zNdVecytvnTHjF9o/Eu3jUVBU71JVuOjoaDnmlsjcjonF87TicY7hhu9BwN+wY\neMM9p737syTjh2YK+nt9D7dWaKobt/dz2tS2w3a9IzGOXXsz6K3pw3qNg0V3zzArN/YAsGnnAKcd\n117zuA3Oc3hh/X5mTWw+YJuHegzfUFXFOjs76evr8353d3fXNK9HiBDhtcE180lIr3NPqqEZ1uvd\nhTc8SroZ+m2aR/+YvZFcOJZtY5jiG1EPYAVqrheWgsHh8hHp18HgdSHv8ePHk8vl2L17N4Zh8MQT\nT3DmmWe+Hl2JEOHvGq6LTnrjcTdGRN6viHIFeRtHsc/bRb6kv95d8GBaticQqcrIH0mLY+bP5N44\n5H3YzOZr167lW9/6Fnv27EFVVRYvXsz8+fMZP348F1xwATfffDOf+cxnALj44ouZPLm2ryFChAiv\nHW9E8tYj8n5FaHp4jEzz6CfvQun11byDQWeWZeP+UuSRddn6VAyAzAE072x5iKZE4yHp48HgsJH3\nrFmz+NWvfjXi/rlz53LPPfccrstHiPCGwf6BAo3pOOmk+Nx6MkXSCdWbEGqhe6BAQzpGOukf0z1Y\noLk+QSJWHbiZzZUxLZvWxmRou+Wazd+A7H0kzOYDQyUUWaKp/rUHSFm2TVd3jgmj6pEliZ7BAk11\nCRLx8PMoayZ9QyXGtde9pusVSjq7e3OhbYPDJbJ5jaa6uLetN1MkGVdoSMcrm6BYNtiyJ8u49rqq\ndwOEANWXLTKmrbqvA0Ml4jGF/mzJu+dK2LZNV0+Ose11ntnZtm329OUZ116H5IxTXeBdz5cM9vbl\n6WxJeedYts2W3VniMZljRjfSkynSmI6FYiJ2dQ8zpq2OmFqbZGudUwslzbdmmJZV9bdl2WzenSGV\nUEknVOJxxfuOhgo6fZkidakYqYR/naV7V/B/G+7lIye8n4s7zjng9Q8VjpqAtQgRjkaUNZMbfvYs\njXVxvv+pswD499uWIQG3//v8mufohsXNd6xg9nHtfPQfZgLQny1x48+X8455k7jk7ClV51z7388A\n8D8VbbpKhvw6cLdpmWzL7mBq85SawsOR0Lw/++OlQPW4vBosfm4X9z6xlcvPO45Tj2vn33/6LBOm\nZ7A7N3LdnH+jOdEEwDf/70V2dg/z7Y/No7059aqvd/MdK+jLlkLbNuzKcO2tS0L3c8Mf7sYup/nF\nxy6vauOexzezZNdKmuoVvnvlZVX7n1i5h3v+spmvXHU64zvqve2mZXljB3DlBcdz3pzxVeev2TbA\n9+9dxZknjuaqd5wAwOMv7uH/Ht3E+88/jtOmdfLvP32W9iZfcFi5uZdfLd7I208dxwcvnAbAqi19\n3HqfiKq//v2z+c7dK5k+sZnPXSFWSGzcNci3fr2SudM7+fgls6r6kc2V+ffbljF1XBM3fHBOjdH0\nEdT8g0vvXFJ/YVMvP7l/rbddSuZomLUSuXkaVqaTz922jHHtddzy/73FO+avu5cAsKJ7JRefeGTI\nO0qPGiHCYYRmiAlhKK8BYDj+tQMZP4tlg7Juhvxre/pymJb9N/vcfBPhkWfvezf/ie+v/CkrulfW\n3K8bZs3tbyT8cevDfGHJLWimxsrNIsh21ZY+9vYXIFair/FZ+kuDdA3v8c7Z2S0ijQcOMrhJt2qb\nkSuJuxZ2De0hPmkDieNfrLm/qzdH4riXKI15ofY1MkVsoKsnrOFXmrbXjrAuf9veLADPrNnvbXtx\nUy8AK17uYSivIaWHyE+7H7mpx2lLRG4/8aI/Zv2Be3VzAGzYlfHb3LsRKV5gxYaemv3oHiwCsGVP\ntuZ+0zIxLfG+BX3uZoC8NSe+oC9bDJ0rN/Wjy3kxxoo4d09fPnRM2RDPOqkcuSVwEXlHiHAYURlf\ndDDapghSssnGtzGsiUm1NyMmt6DPc1XvWnoLB0524pL366F5P71HJF7al+/2tlkBf+PR4PN+ZOcT\nDGnD9BbD45zJlVEa/WVcRaOaaJUDBEC52DW8m2v+egNL9jxb+wDJIjZlNXJzd2iz+1yf6FpywPZ7\nC/6qHsuuHm83IK43EyasSvIeyVRdy6Li3rdhWpiWTWzsVtHGpA0j9jN4vf4KoaW/OMCSwu+Jz3hu\nxPMrz6nEf734E7723HeBcLR7Tisi1WWIH/8Cw8ZwVV8AJFXz/pbragsHZVMck1CqXReHCxF5R4hw\nGFG5tEc/iKU+Zd1Ead9LpvU5/mfdrwF/cnU1hf35Hn625k7+47n/8jQGoCoDl6d315hkNw5sYU3f\n+oO+l1eLxri/TjVI2G/0pWLuhAygW+EI6d5MESnuE0ZBD5MfgHIQEtOjO/8KwIPbH625X2ndj9q+\nl8TxYeuFu7xpW3YnALZR7QEtaQZFxSfvWn0cibzzAQKT6rIMpNbXJH+5xj2qTuCXYdqifcVpq0Yf\na12vp6Ivq3qFCVtOjEzQwf6XtDD5dg3vYcfQLnoKfWim7l9L0fn+y98mOfNZlOZetiUfreoLgBQr\nB/7WqIWyeeSj0CPyjvC64I2UZvBwwqwgU10/OPKWG4RWtze3D/AnJ3epUG9RTMq6pYcmm0rh4EBW\n8x++9DNuW/2/r9ifkfBiz2q+8dz3KRrVpBCc6IPEFyTv16p5W7bFrSt/7hFg9X4bpW0vcsv+mvtf\nCbuGdnt/l4zw5NybKSLFAuRtVCeRMi2bIW2Y32/+MzktX7U/eI2JDeOq+g4gN4nnbNvhB1jWTWzb\nJlN2TMuKUUWufZkScr2vKeZr9NGNZu/LhImx4JiWpbosyZnL2Bd/gd3De6vOryWfuEuuTMuirJtI\nqmjLNqsDNF3BsxAwZe9zTNKphAgEfMkhb9saWRjakFvtPefKe1m+z3cZ5PScZzavJGJNyZLT86G+\nACEhDTV8zu83/5lfrL0LzXnHa1lgDhci8n6TwPUvWrZdMYGaNY97pW2vBat61/LJJz7PtuyOmvst\n2+Kl3rUU9dIBr23VEAAOVV9/uf433LT0m6/6fLcfQfI2TCtEriMJMJpmIsUFIbYmW4Cg2VycP1jy\n/YHByaaSED2zeeC3bdvkdJ9MKv3ouiGIwbKtmtqWi9vX3sXu3F5W9673zgHxXILm/JAGG+hfppzh\nzvX30F3oxbQsBofL2LZd9QxFX6rHqrfYz4bBzdy/9aGq4yzbxjBMYsesIzaxtrm2ss18SQ+ZTHcN\n++RdOSn3ZkpIcX/cCjUEGNO0+c7z/81fup7imb3Lvevphskftz7MZ578Mn0lIaTJkh+xXiwb7O4f\nRG7sQ2l0yLscDnzTdJMhLYdhi/5KEhR1v49lzWT7/iGkhE/Y+RoCxLDdR+yYtfQMD4W254o6iZlL\nSc70a04MlAZDxzyzdzm7zZeRm3tInb6IPbl92LbtRZAXk7tZMfCMr3lXQB2zleuXfImCXgwJoK5F\nJp2IYds2e/Ou8CXh2pJ0w2JgqESuqDNYyrIz/gyJ414C/JgDwzK4e+PveaFntX9fWt5/xnJ1v7Kl\noWqzeby25m1ZNn/peoqVgfaPJHlH0eZvAnQPFvjCT5/l4jMm8fLOQbbvG+J//n0+Dy7bwX1PbuPm\nf57LxFEN/PWlPdy5aCPXv382MyYJ0vjNXzbzyIouvvWxeXS8hsjZIO7fIibbv3Y9w5SmY6r2L937\nHHdv/D2N+jF0r5zOj649J7QsA2BTV4Zv/t+LfPySWcydLrLzPfp8F3c/tpkbPjgHPdVNS6KZ0XWv\nLnPfc/tFAJBhGajy3/aZvLxzkO/cvZIPLZzGceP9VIr5khEycRumTUyt1iZKuomUEGSwfVeZNW39\nXhCNaxbvCfgyQ5p3gBwf2LaY4eQQ0IYkSZR1k49/90nOOGEU557lB9Zc96On+MAFM5h/6niG8hrX\n3LqEc04eiz3xRV7u38R/nHUjsQOMgaZb/P//+SRnnTiGf3nHDD73k6VkpN0kRCBxyKToE7PFnwZv\nB6A50cSG5Z1s2JUhEVcoayafuewUZk5u5cWe1WzdrPDw091V0dvd+XDw0n/d8xLb9g3xb+85ke/+\n5iWuuGgikmKCbGFaJorsE+SqLX384Herue59JzNrShvb9w1xyy+fB+CbH5tHZ3OKVV27vON/8dBq\nxqnT/WsnXkJp9f3QtUzSmfKgR3j3P72de34Dn79iNt/69UpSpz8ROvalbfvZ2TlMc32cz922DOnY\n5SSm+wKQVKHxLVu3n98/v5LkTH9btpynLp6mpBlc/+Ol5EsGiVk+mQxr1Zp3X/ol1NR+8oqBbvhR\n0kPlAnJdmNAHy2F/76833AdAfLLQqH+y5EFSPafQ0SKeUXncc7yUAzlZ+x5iEzZj2LBpcAu5cnXf\nDNPi9kWrKDrmckm2QBZC4pduX06PE6TWcEwXeJ+5ze0PvsykUQ3stzZXxRIM6znyJdFfqYZQ8Z3f\nPUeb7FtB5MZ+5PQw2BJIdugeilr1+bWEuMOFSPN+E2CjE7X50LM72b5PfJCWbXPfk9sAPzr0waXC\nf7Z07T7v3EdWiIIxm7p8Te+1wtXmRlp77EbuZhFmuv6hamn2ryvFMfc+4ZeSve+vIjBm+cbd/PdL\nv+CW5f/5mvsa1BoPFktWi34/tGxnyOddKOkhzbsye5aLkmYguf492eLhZ3d6y1hcTb7HMZvHlXhI\nU3DJ0bAMFu34CwPNKwDxvIediPdn13fTlfMjfVEML3p2l6O1PLVqN893v0TeKJAp1Q7ScbEvI/Yv\nWbMPy7YZGCqHTI1lI0jezrNP+pO1bulelHDZuc+la/ezYWAzt6+9i8cHBUm8vCus+e2vIO91OwYp\nlk1+/dgG1NHbWbRGmFslSZivg1i0XBDzn5buAMS6eq/d/gLL973Attxmb5tml0NLBOzOTeJ/UwgE\ntczmPaVe729TEtaRxc+NUIBJ0dm0O0NXTw7dsFCawgFykmqA5L87f1ixKqQVA2RLeedehCY7arRN\nLOW/vzm9uo+u8UFp2093xo84HypWa+lBzTtoNZJi4t4yWYOd3TnvGVYimbb454unM2/mKN4+2yfI\nn6/9FXvG/L7KvVHWTZZt3hHaJsU0hgs6PYNFL9+BlvYtJA2NYoy27xtClqvzIeS0PANDzvtYg7yF\nZu5bshLTxfcjmXHn+v67nCtWZ4orRWbzCIcSlZGiUrzAQCHrmbfcCdUNsKn000LtwJRXCzenkSzV\nfv08U63j56t15ZST8CS47MM1t9nqa5N+g6biVxOIogdyJQfHslAyQj5vbQTyHtZySJK7QNsMCS8e\neRd8Yqg1BtlymKzcyF8Xe3P+RCkpJprmulWcyzb4wlqmXE3ewTEKEpebgSpE3gEBSDMsUDXkQKT2\nYDHnBWC5SMQVT4iT04JUKpPT7A1EsQ+X/D70JlcTm7iR4lh/nfJAKSx8uglz+lIr+dnqX4aEqoHC\nEHe+fA92zH+PJMW3mtiYge2Oz7aW5q0NBI4TRDE40lI/xaA3U/RiG1yhIISA1hcb5QsBVkEEBA47\nhNubKSI39DM0cTGmFCDvcjUhm5Lfn64B35ozXK6+n6DmXWt5m6aJ96w0AnnbisbZJ43lX981k7NO\nGlO1X2kJC2NlKUd8imOSdueCWJk9TuKaGcc0ITf1hPz6l54nhILebBHd9L8LN2ZgWM95Yyyp1fcg\nqToZR8gNCktSsQXbkpDrhjyXVqYQHs/GeEOkeUc4tIhVJNxPnvIUNy3/ukfq7oTvEnStmsG1siu9\nWnjBOCO8fqZDDF6QTsW1C3qRVervUNp3UyxXTxSGUjs46GAR/AC1V6F5uzm7K8k7XzIOSvMeChCv\npBj0Z/0J1jQtbNv2JlLN1MgXq33Kg+UwWelGONZhMKhNK4bXF89H3uATT7YGebtL2AAKZoA43Ykx\noKGEzeYWyROXED/Gj3LPFKsrMCVisqfpuYFKlQUt9hd88t7W7U/87uQaxEDRHw/TMulveB65foBy\n0xZW9a1DM/yJfqhUYwJWDIYdTasQ8C3bloRqJ8g774zhPV+bQS2gPTvrg0dapy8pOn2ZkhfbYBuB\n4C6XuFS/j5Ll77cKIrmKaxbvzRRDhKZaooJXvkLz3jS4FSvhH7c34z/zXMDEbmbbkGyZTDDOooal\nwRVkhgvV38yY5Hh0S/e+p2DSFu/8eFhrjU3Y4AluFB33k6qxs1tsax6TJTFNuLdkW4yHkhTj25sp\nUTQDz0lLOPeV9yPTlWrNWVI133IQ0MylvTPBjCHFyyROehqAgYL/DZzVeiGtyRaKRumIBeNG5P1m\nQ0CajKsVmrdyZDXvkczmXiCRM2lVLrfaNbybItmQ9haEJudqbg9ix9CuUDRxELkAMb0as7k7gcdU\nudpsHiDQkTSUIT3Qf8UMBVaZlk1eL2AENJ+hgJbktl+pLZtWOFguUwoICLJBWQ8njwlOpBkt7PuE\nsHBQNH1hyU0sEgzyqQxYq4zyHa6hESZiCrudSHsMYbKsDCQKmnF39PmWCKxqrTWoea8f2EivuoHE\nCc+BLO47GImdr0HekmJ4VoVcYLy1DXNRiFN0iNHVzmPHrmJDYZV/vqPlDeVqvE+2BIpBT7ZAr5sg\nJBCZHdNF/Elw3EzE3+ZQC9aQKBE6rPmad5D8k5YgviB59xT6+MHKn3r3D9CdC0SmOwKKOdSKtuUU\nVCsVGsOagVmOcDGU10LzjLZsY2voAAAgAElEQVRtFi0J0Qc3ULJWamC5gryDEfbGUKM3Bjv2i/dR\nSfnHj5VEPIIuFVBkib5MMWzCdsZzqJwj4zyDWj5vggKSs9/oHYdeSHrjLzlj1p0V42V0T6TdmEZK\nTWLaZkjjP5yIyPvvEAOlQTQzaEoNkETg5Yx55C32u2ZzzSpXSemHMsmHa3IdSZu3bLe/Yn/l8ifX\nZOx+XJU+tqLtE9NI0dI/X/Mrfvly7dz6w4Go3FdjNvfIW5FCVox8yXCehY0yagd7ctVLbwAKhk/e\nUkVErGnZVcQ8FCC/2uQttO6gmX446ANWzCrNO6g51zKbBzX3vOFf39e8S9imgm0qlMxqn3cQtZaa\nxWOyPz6KDtgVS+L00Du6dzAgyFnV01qwv7XKoxYD1oOc5vfX1WpRDE+wcTXvuvxUrFwrshX3rDWu\nEKS2+W4J28bT8mwIERtA0m5Ckm36snl6B4uOUO2/N0nTIe9gwJfzHWtbT8Z2hJu8JvrQmy2FiClh\nC7N60KKU06sF3IGCP0Yll7wHRoMZQzHTDGnDXpayWm4C1zIwVNA9rdUcGIXZN576mMidviWzXRxb\n49sXAl8wsMA/xsoJ8pcSBU/zjiXEOOp7jmVS8ngAslqWtqYkvZli2IK2XUT2DRQDgmhgjLRts5x7\nEGOcSqj+flMV30dIKLTodSL0bSNGb6ZIWhWBevkaY3M4EEWbH4XY1T3ML/68nk9eeiKdLeGi9nm9\nwJeWfoPx9WP5wunX8H+PbOIvL/oaZnACiFVq3g5Db2m+l889bdO89VLv2B/9YS3nzxnPFRccf1B9\nfOCZ7by8c5Dr3z/b+1DveXwz2ZxGLqWBAiDxg3tXsdkpSPCpfzyJyWMasQhr3pWlI7tdf6/zcXUP\nFkKBa3nTn4SeXtPFI8v3ceOHTvMi1otGkUw5S4MzET350h4ef3EPLQ0Jpk9soXNyteb98PKdrHi5\nhxs/dNorWiFcYUOpMJs//uJu9vUXkFI54pM2cHfXBo4ZfQ0dHdNC5xfMvC9WKz7hqopMMbGXb6y4\nN3R8vkLzfu7lbh7ZvAncVNWySV+2xH/+5iVQyySmr6Bo+WSlxk1PAHIzuAXJ+4k1W2nMdHHh3An8\n/qmtDA6VGTfL13SDxPenZ3aI8+MlbC2JpOrsGxxC003iMSUsSOL4CZ3+j++oZ3dvDqV1L08Ul1G2\nXQ1JRBkPFzRu+eXzDAyVuPyiseLWJBnLtugeGgScSHTJH3NbjyPFNJ7esI1ZiX5mTWmrGVQUFCCW\nb9hLYgZItoK2+VSSJz8VIkPN0kgACScVpmzF0S2DsqlVuUJsSwJLDWt5FRpfPpNAaRVBcXv6ZEa1\npukPHJM0WxlmK/Gpqyi91Iytpfz2jJj4B+zoHeCWX66gN1MiMcrC/WpiOLWoy4M8u24/v35sM23j\nstDqdlJEUvfkMlzzvb/S2ZRkz2CWeDNgim9GNtPY2GTKQ7SlWnjmZT8S37+voNbqru0W53em2wG4\nc/09nNwxq3a8i2yKNtzgMMdaUVpzJraewDZU1NE76Fk3FmjwrmFmOmlKNEFJBLt2NI9l3fYBVm7d\nBzEorT4Lu1SHjEJ/UVhrmuvj5J0xHNf/DrYMlGHKWi/4rqkuTlmvuIf++fR0OMl0VIP+vAZpMUZ9\nmRId44UrIK8XSODniT9ciDTvoxD//fs17O7Nc/+S7VX7smUhDe52tJYgcYP/UUE1eXuk5Ex++/rD\n2vdjL4i2Hti6iJuWfjNkuq3EH57ezoZdmdBktvi5Lp5d3+1V7zEsg1Vb+ymUDTI5jXXbhfZkOaTq\nkXeFGd9dJuVOYNv2DbFuh29CzZm+VnnnY2vZ11/w2ga8NchlS5DDLxdtpKsnx+qt/fz2iS0hs7mr\nzdz7xFZ27B9mYFhM/I/tepIvLf1GTbO6YYj+xlQ51Hd3PIPEuLXGWveiJTRZ24gJE51kkUooJOMK\nQx3LveMkXZBVIUBGmmFy2x/XMRQ0dct+pLrascf3IzqIxy3vOXmacUwTpk5bwlSKvLBR+JT/vHQn\nz6zd7yWPUWWVklXh/5QNpJiOrSWxTRXd0ti0W5hcS3p4vNpTbRhogM2oVnE/8amrKdhhbV+KldnX\nX2D7viGyeY2N+4VmO8FJbtJfEM9/zrQOkulAycfhZmxLwlIK/OB3Ivgp6Au1ikIjdCO1g+M1Sj8Z\nu5wS5tsa5OvmsVY1oRWu69/gCUFuxrPyunnYhhr2V1eQt2vilhQD07LpaEqScuSQs8fOo8mY6B3b\n0C76Kak6tiWDrXiad1e+i+37hsgVdU8rbUk0M8Y6EXO4ha58F89u3k6uqLN70DeB1xtCECrbBbbu\nzrJsXbfXx6ljBOm675rrLtm63/fne/0P3CMO8br7zh53BhMbxmNjM1QerhKgZEMoIak5j3uBeZKq\nYVsSdrEejDj67uORZBu5LksqoVK2xHc0a2In582ayrFNk9kwuJlp08XzH8yL91wkh5EYk5jAgN6L\nlMgzujXtPceGRJpPvWc2tiV5yk1bU9IXnB3yTlvtTFBO8PqWLfrfaaGkM7vzJE5sn0FnXTtHAhF5\nH4UYcgJC6pPVfiP7gCUvCJnN3UxIru9VqTRlSbVNzot2Pk5faaAqorkWatbudYSDsiHuo9NZu+ul\nAK2INh9Z8xb34i6BclG2AxOxc7/BW3Ozk2mmVm1Wl02yAZPycCk8ybhc/IctDzJQGqyZdcowLZAN\nCsmuqr7Hj3+exPTnvd+1AuLKtiBDu5T2+pSIKSiyhGwEAn1KQrovB8hINyykZA65MbBGOKC9h9Jo\n6qItJRYgb9MCbKRYmeZEI7aeQIqXqtJn7sntJ6HEmdQwAc0uhd6Vjsni2tZwC5gKyCb5ongPKrN8\n1cXS4n1QjFCZSxcJSbwbUkwjmw8kRTHFxDyrbTqqpNDPDuIxiX+7ZBbHT/K1HqtUJywA8ZJnBXH9\ntbGhiRyfOA2A/ny1sCNZCiCBWaE5O/uTagJFlogPC3J9dt/zvrAqmyT0Nuxio/C31iB/M9sqtErX\nv+1sb29OolllpjQdw+XT30NCrqO8+RQAzjhFVC5D1T2N2y6lMTPtKI0DyM3i25Ad8rzm1I+RUJKY\nvULI2Ws6Firn2zH6RzPJnOe0GXgXnb5ceubxpBMqaOJdcZMDlZx3Ttt6EqUX345VrAuR97hRTh4B\nh/iS8RjHNYtqeHkjXxWVPaV9lD+8DQNCaFUMMGOeddF2+iCpGsm44rXxrxefQjKhcsGkc0UDDb1M\n7KzHkp3+OO/8xLiwcClt+xjVmvb6m5QTzD6uQ5i9nW1j2tJV1gNVkZkxfpTTB92L1bDNGLppM731\nOD520j8TV0Yu9XsoEZH3UQg3pWFDuka6wVcIsAp+YO5yD9eXqCgyECAb+cDZygyrOjBDMzVRLEEO\ntx3KmuWQd8kh77FO3WOXICqXihkBn7duGV6gkrf8JlS9yaZsB5f4iD4G/es9gexfwSUvUqJA6rRH\nWbTjL962XLmCvKtyh9fI8mZaxI9byZ66p9haeDm0T2nuC/2u5VPXKGDbYDlZtSTFEOStSEiaX3fZ\ndLRGzfKfeX+5j8TMZUiq4S83CvrNAwFKdllMikrM94drugmqjiTbNMQbsEsppHiJTD6Q7U6y6Cn2\nMrZuNC1JQSZBa4LWuB3bkjB6JmBbigjGGhSknXeimG1TYczwOYK8Ee9lMmVWvXMtsrOkKFYmGxDS\nipYg77H1Yzix/QSM2BAtHWUkSfKIxb1HW0tCrOwJGG4msobisXTUif5nQwF8jvbs+DhtUw2Rr/ve\nJeQE8ZiCVaynM93OjuwuQd6ShSTbWIZ/vhCgrND5Vq4Fu9jgkYvSIN7rlqYYNjZJ1dHsFckjLtcq\nIyl6IChNwth/DBAonOFcI6UmUWQJa0jYyPP0e+MNYPaOp0FtAFsKPUM1bnrnx2Iylkve5QyWbVGS\nRTu2HgdkQdJObAKAGjP8sUMQn/usl+xZ7uVkd3FSu59tJnHcS6RmPYuk6khmjNaGROBaQEwnHlMo\nODEPKUX0rTMlNN5sOYuqytiyLtwWtqC5MbFjkWwFdew28vWbkRQD25KIqWIcG2PNSIkCclOvqG8e\n8Hm7z8H1a8tNfdhjnRUThloVVHskEJH3UYxaUeFBM26tJQth8hbHFsoOwcmSZ+6CEaIxAyjVIJ4l\ne5fzu81/Iu6UKHQTHlQm+we8oLr6VIzm+rgXqeznwq4OWOst9PmEqRiA7ZVelOoyJE56GjsogDj3\nIAX81K7mDWHylOurE9G4ZnMXlZp0Lf+pYfpJNoaMAye3qWV216WiiLB2J+eA5m1LgQxteYe8bf8e\n9pZ3ICkmetfxGN2TgLDmHXz+linGRFZNz/pSMjSSJz0FQL1aj1VOIUli+ZUXSZ7MY9kWY+pGe+lb\ng9HphprHLtWBkRBaqwQ9Q4Jsi6azpGr/MSQK47wJXU6UeEL/XxLTnwtZB1plx7edyntCK0DRsa40\nJxqZ2ijiMFIteeceAuRddDRvyRcw3ICidCzFqAZB3iUr8Jwd8rZ0550xYk6kcfC9EwlyknGFsm7S\nnmwjbxTIlYre+YYhuwMi/ne/rQpSsDVBCLGJGyFWoqlRXNclJUWWQBcEVrByoh+q7hEj+OlTvWVy\nAdO+KsvYWgoZGSvmm91BmHx1A5JyGjk9jNK2l8TMZ5AdQSKlJokpMmbJ1byz/HHrwxjNwuftWg2E\ni8dGaXfW5scCPnkH7rNetm8Fd738W4KYN2Yu7516iX8/ySFQdFRJCEiiLdcXrpGIyRSMIkkl4WXO\na3LqqWfKQyKHhaO5e5kiTJVYYTSSbLFOe1ospzNVYoo4/8KxFyFJoI7ewdi2tDf/ueMcU2XqnMC7\n2Litfl/N2EEVHDrUiMj7KEN/IUPylCeQW/bXXCccJCOj1gtVg7xdYrVtO+QTr6V5u/5qqC7WAHjL\nJJTGAZANr22fvG1PA3LN5om4Qkdziv6hEoZp+ZHyjoYeLIPZEyBeSRZtuZp3/NhVyE7mLjcgxtMw\nAm0EyTtoqQhOhi4KevgeNcMK+fprJWUIErxsB9s8sLAFIg7AUHJYpTS25ZyrGCTiCrIsYztadHnT\nqZ42plt+H12t08o3CpO1c76LEHk7yT0kxcS0bAzTIqsNeoFCti152rmUKLK/wmffnGyixZkw3XSu\nSBaWpHt+WDdCtzebc8bL0byNGLph0eFoS7HxIpuZXJ8N9bfDnoptg9wUWAoGDNvid0uiBVsT10qk\nxHlFowSWjLZtltBuXXOrI2C4qTjr4knGNAvhQx29E6VjlzceAGXNyXtQSiPJlne+uz+pCGIp6xYt\nSeH3HigP+uTtnO8SnEsGleZYs38Mck6YY+Vkgfp6cZ6vecvYegJsWJ9ZizJqJ5IEiuW7GWwthW0H\nnoOsE1fiKLLiLAGVSMuNSIkCx09o8oPLzBjDeY3ZTWcgqQbxY1cj1w1jJ7NOH5IidqMk+rJjaCeP\n7XrSfxCGK4CIMY5PWRsao+A35Uac10JKTXJK58zQNkm2ScpJLzmPa2lQO/YwNGoJBb1ISvXT5SbV\nBEklSaacJaZIQrM2VRJxcb5hWpT2jQ9dwzZjnvvw2JaJIj4hVqalIeG9h+51Fdm3HoTa0OO159rD\njIi8jzI8vnOZSBRw3Es10xAGyeBHf1hbtT84eWu2+LusmTy8fKfIchUMOlGq28+X/PaD5kkXwQpS\nUsqv4OOlHJRsQbrgVeJJxAR52zbc9sd1dGcdE6ZD8oWSwc/+tI7t+4ZYvlVIvHaAmAbcDGSBicI1\nobkfYFk3uOuRDdz57OPszPjpX3/1WKAkZg0ff7DYA8AdD7/MMxt2+PsDWt7zG3q4/+ltXoY1gBUv\nB7JG1bBkDBULWJbN/zz0Mt/9zUrW7u0CyRZBOs49SorQvFVZwpZ1UnIdVqbTm1SCfmQ3iMc2Yz75\nB4Uwl0D2noDpaeZim6abFAMa6JT6qb5Glyiw28ls5b5DxbzM48sdM6yreTt+U9fE6fZxy0AXS9fu\n8zVcI0ZXT47lS2JYxXqkej/gUJIgZTfz2TmfADOOlWsRVhGnbbmhnyFpPzNaj6cp0YBWEtey4jl+\nt/lPDJYzKHoDZt94QAqR992PbWbzfiG8NSTqGN/a4l03Ptl5FxzXgusxsYvChy4lnVgKR4BKqkkS\nMZmybnoWiKyWDWhsYdLxfMoBUhjVmgYkFIe8pXiJtMNHSVX0W5Yc06/zPONOXeykEqg1YMvCwuCQ\ntyXrYc0dyA6oSDGd3WN+i5zyg62GCjpnj52HbVbTQVJJEFcV8jlQibNreE9ovyuY6Hum+hslv5JY\ncL16LeKb0Xo8lxx7MZIk0ZRoqNqfjqX9zHqm6iXsKSf3M1jOkI6F6y00JxrZm9/PQMOLYi4zVS+W\n4p7Ht1AeaGFy9p3e8dZwi5dpsi6pYhtxJFVHVWTf8uhp3lLoHqxcE+WNp4KexDBtNnVl+PkD6w95\nIaeREJH3UQYvwtGSR9C8fXJdvbWvan9wOYcZINpHV3RhmFY4KrZm1R0/GKyW5h3URCXVr9Lk19AN\nrNX1yFtm1mThk3txU6+fwcohnadW7eXZ9d3c8svneXHnDsDRLBHLSTzLgeFrIp3pDq8PAC9s7OXJ\nrmdZXlgUShm5bmcgM1egb66/2fXLu9jTm+dXj/vJN4JLjH58/1r+9MyOUCYwSxJ/nzdnfDga10Hf\ncJ6+TJElq/exbscgK7YJ4cQq1vtai2z4ZnPZICY596m7+Zb9PnqWF1MNkH+15l3YO9Zr31svr1ue\nyVnfNY2J6SkhzXt3r/PsHRLq2qvRtdsJ7nK1UvcenWdh9ApNRx23lb+8sMeL5G9K1lPWTdZvz2L2\nj64al3pzFJObJqEbFtZwC5IEckoIdW5ynrdPOFvcS05MY7uNjSLeApDxScMn7yKPPt/FcKmIbcP0\n8e00pfzJ2DYVLjhtgvfeubGK9YrQqpV0nqb6uDdeKVVohZpmehaIgVLGF5Zcn7kTeCincsSOXYXa\nIVZttNbVMXl0g9NH8b6NHS15edBd8vVQIfx1NjaGfttaCjlRIjZlFSYaKZf8HfK2HdO7jS0sHDbU\nxVNccf5xjG2vJ2ZWL29SZMVZlSJR6vOjqMubT0HvOs57zu3pZibEnWWkqk5RFXPP5I4OLxVqXQ3N\n+x+mLOSCSW8T/ayxfGx0U6OnOYPvv3aRVivJ2zGdpzYiyRa2odJQkRBmzqQp3t9mpsPLQJlMqCTk\nJHJcFwKPa4Gq4bcHMHomYGU7aUzHMEyLp1ftZdm6/fRnj0x+84i8jzJ4ZGHEvIQQQYQCoCo0ye99\n8kzGjvJfZMM2mDymkUmjGiiUDQzDqliPWi0cZEv+MqNaPu9g6khJMTzy9uoDy7XIW+GMmaOZM80h\nXOe6biajYPUeOT0szLmOyTc4oU0d3eH9PaqCvLsHi6GsX36DZu2/HXPgLv1lnt+/MnxOILCndi7j\ngHnc6d+0Cc186rJpVUdqZjkkhA3oTgnIYr0n8UuqCNBRFAkUHQXXzxj0AYoJzrUE2IbqTToN9YHP\nXNHF0idLEe3bEpYsyLikGb7mbsQo66YnxMjJgle8xBUWTC0WIka3L+75AP/90X9gUuMElPpBhu0e\n9sbEWH78nbO5/v2zAbDyTVXjIluOS8AwPdLxVg441291TNXZrF1V79pSfWuEbz1wBQwDhRjzZo5B\nkiTU3aeKZUKKiT12LRPGiOuVyqKm9LX/cBYA8+c1M3/2uEAwWIJETMEGGmMueQ/6AW+u5u1o7kpr\nN2rbPuQ6IYTc8E9v83IPWI5PefrxKe+7cjVvL6eMHo7GnzVugvf3tz8+jwmdghzV9n0YUtkjb1fz\ndp+Vi3Qsxa1Xn8Ox45qIqTIzx4r2bEOl05jBW8ecDvhLSs0+EX9QrzZgDY5G6fdzPnz742+lNS2+\nydiY7QzYuzm+ZSpffN+5/MvFM4BqzbshVs/ExrAZ28qFBZJxLS2hnPZSxZyUrqHNB2FrqVBFwpnH\ntPD2U8czPjlZXG+oDdW5P1mSOG5MBzYW31/zQz/4L0DeDTFfwDH7x5KMKzTVJzBMS9R4l6Ct6dBU\nX3wlROR9lMElC3dyrUTIh1rxosdUORSJbdg6MVUmnVTRdKeggHJgzXsoQN7lGpp3KFtWgLx9zdvv\nk0fejmTtfaTudR3ydgPzpGQOuW4IK9vmE1egv2ogAdKYOmfpiUMm/dlSyKzuJVEIEHZQcLED2ZTu\nWH936B6DwVmVZnWlfTdKu798zG1TUYTJOwjbktCscMrUYVMEuNmlOo+0pFiZZFxBloVA4+ZxxlKw\nLRkppnnpJstWwIXg3INhB56pE8ErGEEiIdWhSeKZarqFZrmFMWLifdATyCjIyYDP2yFRvaSCGcM2\nFc9c6xKrazaPqTJtyRaQID9KVGhS+o9lcvNEjxRcK0pojB0TsW5YHmkpDYNI8aInILgTaX8mXPEL\nwFQC5K255F1EqsuKwCzbJ8J0cRJG9zEAPLN/GT3SJm98VUVmVLodWZJZ2buGPnmrFxOQiiW9dzet\nOMVB9Kz/jjvjbzlL+jwyQBBZc6LJS0lslcWzzpQy3mqKSh+xvP2tnNJxovd7Zsdx3t8xVWFWy6zQ\n8UmPvMU4G/smc0LsLG99eqXw3ZRwnoNkc4w1jytnvFcc5xatGWrj7JaFvHvM+wHoqCCphrgjPIze\nSVxOcOnUd4b2B8n7golv4wunX0MlyhtOp7R2nve7M91BIjYyTbkCigvTDs95drE+RN5u8NtFoy6l\n+OLbwYyhBoJZ61RxDz3FXuRkwUmyI85RFZn6eB0fPuFyJmbeAbZMQzqGqsjohk1vtkRrQ7KqENTh\nQkTebzDolsEL3atGXPJVMv3JtbbPO1A4voJ8Y6rirSEGQLZEBKVTYSlb0MKm3Rqad1+gwENNzTto\nNlcM8k4ke9Eh76CJ17CdJTfOB5WIK0h1Wc8n7loO3OVZSpvwVZt943yTslJtogY/o5N7P2U9LJgk\nqHP6GPQHB9ZDl/2JqXISDd5DZWrP+JS1xKesCbTpkLcsoVNhTrNUdFsLZR3TLFdzjnvFFKR4mURM\n8QOeLH+JkK3HQfXJW7c1Z3mM4pnNTcIJQkLEJTWiSQWQRIpUL3LdUB3BSyItNSKlh4jPWYSUzHkC\nUbkozKlWoQEplUNp24OccM+Pe/fdFHdIIZHHzHQwpnwasiT7BXMMv7b4jBZhnVBNJ5LesDxBQB29\nk+QpTwrLhy15/s7eTMl7Z+aOOhWAMaXT/HE2Y9iGitLc65fRDFilEjEFK+dr/xa+5qwqMnElzsJj\nzmNYy/F88REUZy11OuYHU6WoR5UUitKQJxDalkIqoYAR9zK9uVAlX5sDMHSFhBJnsJxlq5NCdErT\nJIKQyg28a8qF3u+JjX5lrrgqc+boed56cPDJ0hUQsFSmpWbzDqeNyY1+8hcARXIEViksCfnr6yVa\n9anETfE8O5rDxOmSN8C5o89hQsPY0H41UBP+H45d6AsLQVgqdqGJy6dcwTWzP8bcUbOrqskFMXfU\n7NDv9x1/ifftg1jn71aQA0g6wlZSjXvvnRog2/p4WJMXFj4xfm5g2+mjT0XVxfuSTsaIKRKGKQJn\nK8fkcCIi7zcYFu94nP9Z93/cv/Xhmvu9IDFbqql5B3OaV5KvLNuUrTC5xlWZdDIG2BhNO/xoVaiK\nNpcb+1jc/cfqvgQQIrMKn7c6ertXHxfAQiz18j5OtRSuUSyHydsNGDKHW3yTskNoqiJj4pN3a7KV\nhJIIpYN1NSZzqIVOyzFhBwQc19xp5ZrQd87wtlea+4Jthgs01Fiap7h542VPsDGHm/nQ8R/ANhVM\nWw/lHNesssiFbckhzTsekz1BwF0/DIARR1I16p01/yaaFyTkBqwFyRtVJyb5ZFkvi0lIbhhk7eAa\nj7xtM0beqaLVoDoR5bKN2tnlkVCx4JiF841IEsSPXYM6QQRTeZYRSQpN0ma2jQ4nKU88oFG17lnI\nl97yWT4y/QOUN84hVRJJRXTDCsUyiPHQUOwEsiRjWlaoZOqxzZP477d/i3GcGDonKIyBsxzPQTyu\nYA2OQt80h45Um3+Q5QsYFx9zPh+Y8T5/V66JZEz1a0obNu2pdqz4MI0Nzn2ZKumEeBZuJjcXHzxB\ntOUSgmHatCRb6C8OsGlwK82JJi8ILvhadaTamdQwgYXHnIcs++MXU2WSCVUkxnFwaufJ4hoBzTKm\nysyfcDafnfMJPjDjn0J9mtYqgs7M3rApOzPsv++9mZIXhNpWURmsMemblMc3d3IgjFQO2MVJ7TM4\nrmUKkiQRj/vvu7Z9JlY5yWzlHXz5LZ9leutxofPG1o/m+jmf8n7bxbqQ5u0+r2CKYzVQddHVvF2Y\nw63e30GN2nUDphNqiPzbm4+MyRwi8j6ieOz5Lrp6wqkphwoaDyz1g5y2O8kLdgyJZSuPrOjinsc3\ne8uhvCQNstCUlq3bz12PbOS3T2xhqKCFfd4V5Js3CuGkIgHNW27qJT55HWpnIA96KEDGJhYo4wjV\nAWuWbYfK5EmKEYo2V8eJ7E5mthUz60ySkuV9nHvl1aH2RE1rvw61p7kYcZ/YHD92XVL1lr5JG9/G\njq4yacXPmAR45KdvO9ExHVdq3k7U9daTwIxTeukcZDPBkBZ+ZkENasPgZm596o/c+9ctxBM1stsF\nNG+3kIax5zjmjj0RLAXN0vnLCr82s47uCCaSuE9bglhZWCVc4UMPrO/V40iK5UUoIxu+VcJdR+ya\n6yUTSbZIyP6k26AKv3HsmHX8pe8BhmNOX4yY9+wanWPASTiiashWjGLJoqUhUdNnbet+bEWQvO1S\n2iPvYKnaJI2MruskEVOxsh1YTlSxHtC8XcjJAoolnv/9T2/HtGxithCwOlLtSJJUpa25goxVrMPM\ntnGccoa3TxwrYWY7mBYkA0vxtFZJkpg35jRa4+K9NbonElMV7zovbupF1uqRFJN0Y9k737VqeX57\nQNtyMjNahb/YNWnbNqT0jnwAACAASURBVExrOZaSWaZgFJnaPLlm8Q5FVvjc3E/xrikLKrZLwrxs\nJLC1BAoqJ7YLAVRRwiQPMLlpkhfU6eLE9hOIbTsXfdf00PZgVbvebJGCM1e11CdCxzUHyLsj3Uot\n/MeZX+Rrb72h5r4g4oHnlwz8bfZOoLzqbYxPTmZUXW0BIRiBbpdTNc3mSlCgCZJ3haBuF/0I+CDJ\nu0pJXSoW2t4RkfffH/b05vj1Y5u56X+eC23/5cMb+MNT2/ijk6fc9dmokkJfpshv/rKZxc+JZTa6\nqfumV4e871y0kcdf3MOi5bt4fkNPyOddGdzh1om2yk6QkWx6Pu9ahelD/uBE0VtD7aLSbL5++wAl\no+RPtorOcN5J0lLWQRITsbb5VH8Nsmx6H+cwvdiWRGnVOZhZ5+OXA+StaiKBhy37EbxOn+rTMXRL\nx9bjFLJJfvC71aTUdCi5hr+EJ4ahy1X36Js7nWIMRh2y1iisCZIFWMSnP4fS0uMtWQHYYDzDw8/u\n8pa+ubBtKeTzdjNC2UYMWZbEGnDZ4PHnffK2EMk3Jo6qByTQ48LnHTCbG5r/2bpaaSJtihSwiu6T\ntvMcOjqcNe+uoBPQLprjzc44OlYBxe+jm9K0PuaTr6Tqjt88TqFk0NaYrE3eAW3ZM5sjfPluLedY\nYFJWHRLz/LPOulndsJDNMEkAyGaSkmbw4DIh7F7UcQVXTv8nprUI7TFoKhX37rgjSmm0jXOZnvTN\n6u77Z9swJu2n6cSWQxM7wPunXIm2YwZm/1hiqkzKuc79T29n5y4np3Z6nTjdUkgnVc49ZayXZAXC\na59PmSpMvP9w5jGcMdrv07njzwx0vur2PbQ42cckSfJIpLT2TK4c93FPuw1mF4yrI5ugAS47ay7Y\nMmec4I/DhXP9wLjeTNEjrvGdgqynTxTvUFOAvNuStcm7OdHkrYmvhfEd4t0MCl+1zOYH8oMDTJFP\nQ983GZBFeteKtoKat+dWIOxDNzPtmAP+OAQ17wWnC5fD204ZGyJvNxvckUBUVewIoViuvfZv/4CY\nLF3Tn0veiqzSE8gnPVzQ6Q/UL0a2KGtmyHReLBuUpaDmHSbkYUeDtMtpSJRANompCnXJcO7lifHj\n2aVtCpO/G6S07xhGG7Pon/DnqoC1fFlDUkyxbjemISkG/UMlLMumZJSRZJvpbZP5+GfO56tPbKef\nHpAt74PSEZnF7HLaXx8qW77ZPKb564fLKSRkSAhLREdTim5LCwWaqXZSRKzLplgj6yWmUCmXbVER\nyCHsKWMbSU2qZ1sOT7CoS6pCY0oBqoYkWSL5DAifbkX0uhtjEJfjDL94BvETlgc0b9kb/1s+IqKX\nZTuGpYhc4u4MLSkGthHnuHHNfPby2Vz/2FKkZE5MHE77Wjkwm7sJJGI6LQ0xioqF5Wb0slQScoJU\nyuCmj8zllj8+AAjTopsfqjXRChUp6t1o9JyjeadUn4ileAlUDb2QwrJt0kmVn3ziHfz48TgbSi+i\nNGRoUBspBsgqpHlrqZqatxfxK0tIkh+kqBsWsRqEI5kJT7iYPKaBK+efSl+fbyFprwim0ndNI3Hc\nS+h7BbknAqbYoJY3OqTNSSGTKMC4pk7MHuGLjqkyHQHTsV1hGscU39aHFkyjdetuFu0Sgkaw1vak\n0Q386NpzhGUFeNv4M2lPtYX93QcoV/Ctj83zAh49Td2Ih4g0SE6V91OJd501hVMmt4a01ffNn8q7\nz5rMt3+9kr39ec+d0tqQ4NZrziYVF8cGY0Nqrek+GHz5I3PRdCtErkGzubftAH5wgOPUuazrEgpR\nkLxdn/dImncwT4W2KRA3QVggPPeUsZw+YxTppMpTq/wA1WT8yFFqpHkfIVg1UpUCVaYxw3KLhMih\nYhCFkkFf0c/JLSt+SktX8ivr1oE1b6fghuf/U0zH5+1XParPT+WczvnO/mCqVD/pQn3MCc6p8Hkv\nyQo/va0lhd9WFVWSBoZLXtnIpkQ9qiLTXu8kvohp3sdZtovexGa7NZklV/O2QdWwveAmmTq5Ednx\ng7c3J0WQn+l/1JYernYkMi4JE2nZ4V3h47dJxhUM1zfsCACphIrlraUuh0zwthFnsiYKIXjJLZzx\nPnPs6SiWWKftEroiS2TKQ0hIjKpvce6gVhIVE0yVeFymPhUThUEUC1s2sJVgoJjTDyeoTZcLtLW4\n5nKfHBpiDWS1Idqakshp8fyntvqaVGstDckQZnt3kp7deiqzW+eIcUgNI8m2l9WsLqkSjym0SZPR\nd8yEgQl8aMpVBNXFUGCSLdf0eYeIXJFFwiBElbRa0buSmfDM+lPGNFV9R5WBQ9bgaN7ffg22YyUI\nam5BIvdWKQT6EkRdYAKPq3LIxxn0N4MTsJZUkSSJ9nTAOmGE1x2nEiqyJCFJEv90/Lt5+4Szqu53\nJKiKHCLaWvcUJKr4K5C3JElV7cnOto7mJLphsddZMphOxqhLxjyiDa7jrmXyPxioilxlNamteR+Y\nvINCyiuZzYPHnth+AnVqmium/2NVm3WBQlCSJHn9DL67ifiRo9SIvI8QauUZr7Xd07wlhd6MT475\nUrXm7aIuJV4iTTdDRSrCmrfN5sw28Zdjcg6bzZ3lN6UpXuIKKWg293Ihq6QSKkk1GdK8dctgW2GD\nc7AFZswj/L5Myat85Urnk5pEQJJclyURU9AtQ0RKuyZ3JxmDu9YbVRdm4YD/s0FpEfV3FZ2OppQg\n74DmrZXcQLhAZivHZFly5CK1bT9Kx26x3MPSPHJXFYlEXMEoOwJATAv5usHGGhjDhORkp9604Res\nUBNiQgsUtFBkiaw2RH28zsvFbLqme1dIkiyRWMJUfFOuQ85lCuiKIF+9GCAMxyeXNftobnL8pwGz\nbGO8kbxeYG+xy8vHPGOUr9U1JeqIyeIeZdstpOFkbnPMo+lEgg/NfC9WOemZ122nIlk66QpbNnax\nAXXvKbSkwlHESSXBuPix6LunoioyTfV+JLoLNaAdKrLkFXoQmncN8jYSnvm2crKH2r7HukCyjrBZ\n1m+/MR7O8hXsl/gd9h8Hr2NraYrPBXzRAZ93Q9zXhG0zTN6viFfBg0HNVKkIbHu1cO91pxO3U1dJ\nskqck9pnVvnjXytqEXWlUHWg/alEtQl+pIC1hng93z7nZs4c+5aqNmu9ZxAm/1cSKg4lIvI+QhiB\nu6vglsNUZYW+rK9590gb+O2m+/0DA8TqlgYtaWaIUIOat9zcy7J9ItLbKjnLpGJlYorsmM2dNddy\niua0Y/JSqoO9MEVO6sZ4AwOlQTQnA1le9zOvGb0TsE0VJSau35sp8v/Yu/P4qMqzf/yfs81MJpls\nkAAJ+yabICgo4i5Qt69WWxUXcKlaRVu1daFUpbUPuFT9Wbva1trqQ12hllddeLpp1YLWlcUVtAjI\nkkD2zHaW3x9nmXMmM5mQZCYZ5vP+h8xkZnLmJMx1rvu+7uuOWevL7eA9uXqMeVyl+/CrDx7Gqzv+\nbZ6npJ7Y/knrzKBmNUZxFy/ZhVRCoB0DyvxQDc0zbG533rKDrjkkbZ6rcLsrWJTvRWNwM3a173Iy\nd0kU4VckaBFX8PZUrsdR3xSBz96yUo45vxO/5IMkCOb6Z8mcKxdFc7cjuwMUAGiqfYFiPq+4OLGk\nx+nnbL3fiNaGmGiNnESCieYeVrOaFr0egZB1fK7gXR4wA+nKj58xHx8NYGBxYs4x4JfNoXMAJdoQ\nCLGg01TEzmz9PslsEqO5A5V5UWF/gNt75IiC0GGeWBAEnFJ9DtQvx6KqPODMwbqzMzkp8/YOm4uY\nV3YhYlumIbLpaMS3j4PSNNK5uEgOIgBQXtJx7tGdOaWbUxUEAQtGXYzohzM7HFcyRRZR2mFnP8HZ\nIcuI+Z2LG89FQYoe+r3NfUHiHjbvjeAdjWnmErqkQCUIAr459RKcMvLkbv+MVFLOb2e4oHFfdLmH\nsv0phs2TL9DSCaYY4QAS9RoAg/dByT1s/t6n9dANsxfuftd2llu/bEJMtTJcw0BdY9jJAPeXJ/aA\n1ttLrAIq8zXNDy8De/WtqI/sT/xQV/C1h5dlQYbeWG0uMSpuhqJ4h819QgClwSLo4SDEUKPzGu7M\ne2d9G0q1oYjpcTyx/jWs/2C3s7etumeY9fqJIri6pjBiVqFdsbWOcnRlrbmOdsBufNGyHau2/MU8\nUCt428PmghL3NOZwF0KVyFaLVCWC0lJ7eU7iP09Ts13oZm1VKCUqsdvaBGCH+SErVdShrug980nW\n/2NZMiuW7QsdqWq7N/OW4tjXHIEMa3jWmuMHzEzTzLytD3YljrgRQVxXUe4aQraXfCnDzKYgpSF7\nIwvZmUqwq5TbjVZExCYYutnD2mn5GPfDiPuwH19ik/oP8xQ0JqqI7S077SmX2JbEOmDAXPs/sMgM\n3pE2H9o3HO08xh42d9bhC4lhUfu47OBk/32bc9YdPwztAJuuGtcdJCVR8BSs+WQRgwODoe0fAqO9\nFOquMdBVn7MbXjDFvvbuzMo5Blfm7Q48/qQ51YmV46C3mFXlyRciboospXyvV0y5GPH35gGaL2Xm\nndziMxv8nmJAd/DufnAZ6JqKSHXBlC2ZsuxMz3FPz9gXAuky784Up/g7AwBZTrwWg/dByJ15P7Rq\nA155dyfuXvmOp9HK8sfeRn2zOTcc0+PY1xxFZWkAJQEFQsRd9GP9J7IztiIFUtUObCsyd/s5acDp\nAOBtB2oF4UvGLwIMEXpbGUR/GIZoNfiQzbaZPsmHoF+GVl8LQdQhVe72PB+agoaWKN57y/xD/vN7\nr+PXaz7Axm3m4+zgamgydMEMmvubo4gLVvC2Mm9REJ0Mz3OeUvTrdg9ZuzPvErnEeZ8lQSvwuTJv\nLe7q2Caaeyy7s57wl0M9VePun2suvZGgt1ZgQukkSKFGSFWJZXSxz6bCMIC4nZl7Mm+/uYey9f7E\nYDPaNHsLy0TWO7bYWspTuQcQVZSUJC5A7A+BgUHz8a/sfx5hcb815SGgusIOgmaTFA0xRIw2xHeM\nxfTBiTXqdvAGzPXtRpv5evaccNAvozpoBqq2Fsks7LOCS0u7+Tu3i3wG+BJroO3gbVfXjq01f85h\nYwc6owLuoFhZGoAAYGhVx9854B16lCUhkXlrZuadnDFqmp5YrpNuODPpQzlV4RLQ8QPXPUfaWYGX\nnbFVliay/AGl5haVMsy/02CK4J343XXN6CHm//3Dxg3M8MhERul+T6LYu5k3kH4IORvsn+WTRQyz\nKtyryjpvhuK+6PLMSSuJkbVU3+/KcSSTPXPeuQverDbPESczKWmAPPQTfLjTu7ymTdgLsbzOmeON\najFEYqq5jlY30KaJEACE6meiWbaWFllV1MGA7GzacP74ryKyx9zooXaIjMtPnAXdMPBKXQPW7/3M\nyXy11lKIZXUIi/sQ9I81s1PV3NtWFAV8e958/PKjTzF1qoh3/55ocFLiC6IZifWPdr/o3U1WW0+7\nGMfOOiUVMVWHjihEeCtSJ9YOxsdNiZaR5vPNDz17pACAOd/tt+daXQ1GrOB95IwifN76mfVzXWug\nnUYuGgTr/BiajOKAbA25CmZBmL9jsxnJNSw4o2w2Pmr+wNmJKbLhWHO/agDtLQJQYgV915y3JEWc\nJVRicTNaVfPnuzPvb596PJa/vBX75E8hKDFUlgWxwzpGe8574UmH4uebXI1rrO5XQwYU47yTxiIU\n9OGdLwdgXcM/UOoL4ZRDvo5h1SFcEDYb5OwT/us89fCRI3HWCWbryTsunYn9zebWh9VNZlCwh8JP\nnjEUf39nBzTdQHFAdoYd506agj98bG7KcsnJhyEkDMCU0WbWfszUIRhUUYTRNWaf7GWXzkSFK6hV\nlRfh9kuPwODK1FXInjlvSUQsrsEwDKfaPDljVDU9MSef5kP1vmuPxusbduGZl825fk+Rmiu4JQc0\nd5CXU2Tw9187B63huJN1L7t0JprazIs+ewcrO4ja2Zq7u9hti7xVzJnMnjIYA8sCGF2ToiNZkvsW\nH43G1ljSnHfXC9Y6M6DU3BfdMHIbvAM+GcsunYnykB+KJODLfe2oTXMRaHNfdCkpRlnSFay53X/t\nHLz18V488Tdzu9p0GXqqi4NcYPDOEbswTSzdB6m0Ac2tewAkrh63la6Fuyg3psUQi5vLqEQB2Cuo\nKJaLIDQMhVi1y1xcJOowAAQUGYJsZn2TB0zA3z7ZZ3bv8oedtZhavVWQ5jOvnu250jZhPwRBgCDH\nYaiJhgMTBtdC/FhE3JpntTPvimAJmvfHzbXWmuQUpe1vbzH/muxqcSdwxs0GNIpVze5aQlJRFAK8\nsdvJrGPbJiIweb35GnIMorVlpN6ayFxDivke3t3/Nt7d/7Z5pyvzdobQ5Rj8483vG9EihII+TB0z\nEOs273aCfak+BOMGV2P9f8zzJEuCk50FjUrobaUQi5s9xwgATc0wg7ccd4oI/ZIPoiA4PbvF4ia0\nqOZzy1xz3n6fhOpQGfaFzfqD8jLRXLalJ4bNq0PeavD4DrOJSHFAdrLYE8dNw4mY5nmcX5FQWQpU\nxkc79w0vH4RqK3sqtiqFAWDW4MPx7KsfQ9s/BJNHVngyQ3e2NW5AotBt+qihngsxURBwyPBEtfWI\nwR23dxw5OH3w6ThsbjhD58mZt98nQdONRJerNMOZpUGf50PeHdB8nmHljnP0smQeQ6oP9oqQ31lf\nDQChoA+hoLeRjP1+3BcCK+bcDkkUUaIcWMFa8rntTFmJH2VJ8/2pmrR0hyyJqAz5sa85mnYIOVvc\nf0/2KE9n5DRLwVIWrKW4QAPM3/PIFH/Hydw1BRw2Pwg5w+ZWT+WInmKHKxd7yZdfkcwPJ1GDIvoQ\njWuQkpYYybIASUlsU1jXGIERC6BdS6x7tVtzhvzmB669WUMM7TAMwwrePkSsHbxkUUaFvxx14X0Q\ny/dCHmAPi7u2WlQVZ/mUvduYMyft7GGsojUchVhsZuYhV+FOyrWg9rB7WzmiH1vLk5QoxFAj9EgR\nEHd1B/OluPp27fhlX0BIFXsgKHFIrYOh7jSDn7OUyPp9+EQ/Lp9yEcT95m5DdsEaYFZdq9aOSoYu\neLL7BmsBgFi6z5mXtzd+QDwAI+aHEGzGjlZzyL0maSlSib1LkRxDaYld7Z0YNncXOk0UToTeYI6q\ndDXzce+6lG7tbUD2I/blKECXMLC8KG27R3exXbHcvXW86aQqWItZ65cVyRu8Az4JqmZkHDYHktY4\np8mQUs2P2wVv7u1dD4Q9kuD+PZX5Qx365OdCb2XeQOJiLpeZd3d4Mu8U1eBdybzNx2U+X+6Lg1R/\nS9nC4J0jTsGatYGCs/uTLWlLQ7tHud9ndmkSJA2KYG4Dam9q4ARvSXSGtQNyAPWNYQhqAO1qO1Td\nvD+shiEKIoKKGbTsIdKI0WbuYiQYgKZ4CuiqigagOdYC//h3nPvawq41yZriFGm1WNXmRofMW0Wj\n8hnE4hZUxMd4AkiqYOLeYcoZQg81QJDjHdbRFrn28lWsYUnPPLp1DGKRtca8bZIzn+tklFa2LMIq\nHrP+I8qS4BS6tEfi0PbVmIFb9cFd6mq0l0JvLYNUXg+p2mxp65f8zsWa3h6C6I9g475NKJKLMCxU\n63kPIcVV+e/XneO2P2R8kmvNtpgYdTiQzOeiCeeiSC7C5AET0j4mZm0vW1bs82Qi7vXSgiDg4gnn\n4uyxp3d7HW86SoqlYnbzEZ8ieoJOQJGg6ZmHzYH0WZV7Pa6U4jF2Zt3Y0vlFdjr2h36uM9RUpG4U\nZ6VjX8wV+/v+fXXGezHoyox9nS8VS9aF2J2x8U22MHhnQTiqoqXduyuY0yXMyvTiRlJ3LtVbgOEE\nb8Xa9UtUAV1GLK5Bttbl2vPjih28NRkCBNQ1heEXzMBoN2Zpj4dRJAcgSaK5VCfuh2EIaFWb8ceP\nVgEAtP2DPB9WA4OuTRosEVenOMOqKPcrIiL2Bh3OnLcVOBUVMdnMumvh3bIwOXifNOxYs2DKZjVZ\nEcvMPa6T23C650EXTVqAG2dcA3XXqMTx6e75bxHlYiLrtT+c7WAfMMzXtocYJUl0/qO3RVRA9SH+\n3ymIb0/sya3IImCIiG01h6ztna38kh/2SgB7HXZYi2B8+egOGzKU+q3aASXmbCBiaHLK5TElUuLi\npegAMp+ja2bivuN+2GlbSltxkeL5MEquDp9dMxNzhx/f5Z/dZUnLxlQtfebt90nQNHPY3C4sTCdd\n5uS+v7Pg3dDazeCdIvPuK+5h855edOVL5q2kec+pMu/OCtbc2/Wm09MLou5i8M6C6x96Ddc/9Jrn\nPrt6Fk7w9gZ3Q9CgR4LmGlZRcTbZ8CsSSopkCJKOPfti5iYMgt061GroIgnOhhRtERXhqIZia3/h\npqg51xpWwwhamar5QWj2zd7eth0fNXyKQfIIaPW1GDwgEVA9OyxZhg8yX3dAqd8pShs62Oc0QnEy\nb6dtpwpVtIbsFe/8kXsI8VuHXdlh/99xQ8xWlfb/PfcmAYD3P+DQkhqMLR8FGCnmvGF1RLOqdodV\nlzgZROyzqYhvH4ehhrk8ys4AZVFwisbs4VmtvhbaPnP4fNSQUqdHtxEtcvrFA4Bf9jkdLY32xEhA\nqsy3LJAI3s7fhC55AtL/G30Kjhx8OAJS9pbq2PPcQyqDnkrsQTnaaMHdrEiWBOiG4azEUBTJO2yu\nmHPebRHVHJXqJCBJXVjDW2o1jXFn92NqzIu5IQO6N8ztVyQU+eU++2B3S3Vx0l2DrL+T0mJfhkf2\nrXS/d/v3ka63eTK7F7zYyd9YV9eJ97b+ffmUp+xCG8MwnA+WRPA2/9XQMXhDC0DdNQbV46LYGfkC\nsLbLnDV5IF54C04wcipXreCtSCIgxWFEfE5L1XJ/KRoANESbMArmnLddLFUe8mPP/nZz/tgXRZEc\nwHdnX463yxow3bUcZXz5mMTxqTK+MuJknHTUZLz7aT3iqo6nt5hrzysrJWyPxc1kU1NwzNQhEMsF\n/CeyCZKswfBFYBhCh0zbfXt8xRgIgoAbzp0Gnyxi9/52zJo4CDe/9ifnfert3jluWRKwaOL52NL4\neYcLjbISH5paDRiGGfyrS8px+lEjURr0YfaUwSgOKLj27EPx8z9thLprDIQae7g8kXnbRU32nuTj\nhpbhzGNGYV9TBDPGV+FnqzbA3GFcgN40EKK1I5tn2Nx1wTFj0FQkq7CaqMiDvsCGfebPOe+YyZ6i\no1NGmu1qX3rjC+e+AaW9u2/wLRdMx0dfNGDK6AGIxTV8/YQxB1Qo1VPupZR2sLH/litCfs8oix3I\nW9oT+5ink/yhe+uF0xGNe7OpMTVluPTUCc4GGwBw8hFD4fdJmDHeu/NWV10wd5wzrN/XejN4H35I\nFRbOH4+jJg/utdfMhuRs+vuLDkdTa+Iz1/130dn5GTE4hEtPnYBDhqcfteqrCzQG7yxStcSmCppm\nz3mbHxya4A3eEPREYxIkdtzy+yRnq0l7DbOdeTt7RUsCdCEOQwtiZ521UUdxJT4PA/sjDeZuZLrq\nZN5V5QEzeFsXEjXFQ1CsFOG4ad4sa3jpUAwOVmN3+15EP5yFY4+ag1DQh+Om1WD9B4lK7WDQgICw\ntbm9gLOPHY09cRn/ec8cNheUKBD3IVDi/aAt9lQrm+996hgzCE8YYfX/1v3QxXarUKxjRe+RVYfj\nyCGHdzj35SV+8z+rIQCCgepQGfyKhLlHJPp6H35IFYJ+Ge1R1cmU7Yst93CsnXkfOnoAJo9MVH+7\nq5zj28cDhoihlRVQRNmpcTDCJdAjRThhzHTPHL1znEWJC5KdrealwNETRnV4HODNEFJ1EOuJytIA\njp4yBIBZiX3aUSMyPKN3uTNve5jX3rSnqjzgyYrt77dFVFRXdF44l5xVpbsYOW5ajee2KAgd7jsQ\n44ZmnqLIld4M3pIo4sQZQzM/sI8lz0PbIympZJpKyPR30JWitmzo+zGdg5j7Cl/Tra+tYXNnj2Xz\nljlfavfztpc7WTtuOXt0W8HSZ+/HbDdOkTSn4GxHnVn1XVtqZtANkUa0W/PRRdY+t1X2jkuy+bo1\nJemvom8+4jpEP5wJI1zq3bQ+oCSGyJUwRH8EmpUZK7LobK0nyCoEXxRGzO/Zlxfo2s5DJa3mHLPe\n2DED6uxDKRS06wLMoFDiSz386QzJG4bntuyZ844797l55v00H+LbJmGYPsN6Qet+Q0R0w3E4b/xZ\nKX9+kc/nFA8CZuFdukpud/FVLqtac8Gdedvnede+xI5x7mFz9+890/RBbwaufCX1g6H7XMvlUHa6\nTaeyrfB+qzlkL7sCEsPmguBu2WmxN9+wMm87wxZE1QreiblQAAiIAc9rGFYWb2gydlrBe0SlOV+8\nP9Jo7kcNIGgFVLvNYeyzQzEiNAynjpyb9j0E5IDTKtL9HyIYkJ3g/Z+ItZtYuGPwhq/dXI8eD3To\nhdyV4F0ePgSRjXMQ+++UDt/r7EMped/idEt07Kvu5P9/dntUIJF5JweCVEU79lW49+VStwwFzGVP\n0Q+OcgJ4mb/jDlm2rhTP5Bv7nRquM2af50TmXeQ59+7fe6bCqT76XO1XCvECJpdD2U5ilmMcNu9F\numF45lI8mbfmLVhzb7cJwargtoKz0SHztrqLWXPefsneDMMM3ppgXQioMnZY2/UNG1AJn6hgQ/1m\nJxjYa4ZDRebws948ELfMPK/L7y8580bS7kh24xdFFlFkWM1gfFZns5i/Q1WwLMo4YegcDAqmn1eU\nZbFDoZqts0KT5Cvv9MHb/Dd52FwUEsHbnitLfs1Uy4DsD8p0u8h1PE4RRqzIXG5WuRdiJzsu2Mv4\netJoo78RBAGGYSRl3lbw3tcOvyIhFFSSNjFxZ96dz3l39fdwMGPwzi7nsz3HDp5PgT62e387rrjn\nn/j7267+1/HEsqrkgjVB6ph5G9awuW7vNuWLwO9zZ97mtZZTdWy9hu7KvJvaYigt9iHgk+GTzCD9\nft0mAMCospEA9OeBHQAAIABJREFUgPJQ9ypFk5tcOHtuW/RwCLIkmPv/Wpm37rOat8T9Kfv+njv+\nLBw39Oi0P7OzZRzJnbHcyoq9c8IBOXWBlz13XGQdWyITTPS/brcadSRn+ikzbyl1Jp+JfcEW19MX\nOdmvOXxQ560h84m9JMtd4S675rQHlgc6jES4f++ZMu+DbXqhOwrxHOTygqWvLqaZefeStz7aCwBY\n+ddPnPvcm444QyuiO/M2AAiJPautYFgSrwX8m6AM+wQ++WRnztvOvINyEIh3zLxrKspR7h/gVMi2\nurbpBIDRZWYR0qSRlTh99ghMH9e1Stprzz4UX9a3ej4EKkJ+nHroNLypbcWYkrF4+8P9MNpKofgT\nFfHmkjd7NzJft1oHdjZ3lep7t1wwHe9vrcfXjh8DUQT+ZT9WTP2zrz17Cl5Y/wVOn20VaLlesqqi\nCKOGhPD5LnP0IPkDIdV8qzNsbkVaWRJxwdxxad8DAFx51hSsa9iBrZFdnuHjZGcdMwqqpuOsY1IX\ntOWj75w/Df/3n+04+fBEEdSx02rQ0h6Hbhg4ekqiHuPCueMQ8MnY+mWip26m4D24MohTjhyOKaMq\nO33cwcyvSDhzzshO29MebIr8svmeh6R/zxfPH9/lTUk6M2N8FU6cXotjpw3p8WsdCAbvA7S18b+o\nC9fjqCHezQXswCYEWiGW1UPbMwLRlJm3GagF0XA2FnGG0q3g7YsOQoV/KBqKdwBSvMOcd5EcgBAX\nnNakLaq5DehXpo/F7JpEj+sLJ3wNL3z+NzRGm5znAeaQ8NeOTywDy+TwQ6pw+CEdA/3Xjp6Mq6uO\nwsdb67B+rbkft/sqNCgH0BSzh/SVbgZvMem22XMaSD1sPmFEhVOpfv5J4/Avc5dMKGLq4dXqiiAu\nPTWx/lpAYthbFARcd85UfPfnr6c8lmCKLlPJAf74aTU4cXpth8e5nXncGEzYfiZ+uWEfFhxydtrH\nBQMyFn7lkLTfz0dDBhTjklO869/H1pbh21/vuKzOXimwbXeLc1+mYXNBEHDeiWN74Ujz21ePHZ35\nQQeZTO/5pF6qmpclsU/+XzJ4H6AH3vkFAGDW4Bmebln2XHdgqtmcJdJa4Q3emrdgDYCZfetyIhu3\nhs1jcQ2KHgREQBOirszbqjZXJBTFi9BqBe9P2z+CKIiYPND7ITin5kjMqTkSb+95DwNTNFzpLe4P\nUPeSnqASRFMssZtXd7bLSw6YPlmCqplDy501TrAdWzsbr+5ch9HWlEEmiepz89+yksQUQ3Kmn7pg\nzTts3tWGVhWBciyddWPXHlzg3Bdt7o0/iAoJ//K7SdVVZ04ZAJKnWARRSxo2TypYgznsbcQDEKwm\nJPYcciSmQdB9ZvAWI4iq1lIxe523LCIoF6FNaoXgb8OeyJeYWDnes4mF2+GDDuvRe83Ep4hmP2rd\n8GTe7mpyo7uZd9J8kt8nOXPQXWn1eN74s/DVMachIHdvXbS3mYP3WFIOm9tz3vbwd+FNN2ad5ClY\n40cYFSYWrHWTmlRY1CGQGELSsLm9zjuRedubeiSGzc3gFo1rEKyGJHEjirC1TttemuWTRQSVIkCO\nQRpgNvaYOWh6z99UNwmC4HyIuueQPOuVXZttHIjkbPdAd0USBfGAAnfyum+3mKp5bqfaijIx523/\nfEbv3uYtWOvfG2QQZQuDdzfFda3zBwg6/vi3T7Hps30AXJm36FoTaFecJw2bb9vdgi92mvPcUSOM\ntri53tVu0qLIEkqUIATRgFS1A7IgY2rV5J6/qR6wP0QVxTtsbjNUxbOTU1clD5tne79cZ847xfda\n2uOe26kL1rpXbU5dx8ybiMG725Izb7ufucMKyA88/T6AdMPmKgZVFGFghbWHtWvplb0dZtQIO3tx\n25m3IotOYBT9EQwtHppoitJHjpo0CANKAzh8fLVzX1BJtAOVoXSrjaA7eI8cHMKF88b37EAzSZEo\nf+/iGZgwvByzJ3v34lZkEbMmVnsKopKHzZl4976JIyowqDKISSMrUFHau21iifIFL1u7SdW9WVgs\nufuVNY9tf3inK1i7Y9FMPP3uK3izHYAuosgvIRzVnK0129VE8LaboiiyiGI90XQk5Ov7db9nHjMK\nZyYtYXIPm/vl7q0td+/zfPslR2R9swdnnbfr1zRuaDluuXBGx8cKAq4+y+z89vQ/twBwVZsbicdQ\n7xo3tBx3XXVUXx8GUZ9i5t1NquEdNjdbV7rms63MuzJkZsTJvc0Bs1GLTxFRVGT9GgzRqdy2s+zW\nWBva4+3wiT5nWN0niyj3JdYvhtIUqvU1d8FadyrNgUTBmiSaLUaz3Xwh0XGte+Pe9pJBnfVqRJRF\nWc28V6xYgffffx+CIGDp0qWYOjWxdnPlypVYs2YNRFHElClT8P3vfz+bh9LrkofN46ruZNsAnK8H\nWMN6qea8BVmFJIoIBqzgrZutIOubIs6weVu8De1qGAEpALs1hSKLKJMSwbs0zaYbfc09593duWq7\nOMkO2tnecEBI7pd6gOSkJi3MvIkoG7KWeb/55pvYtm0bnnrqKSxfvhzLly93vtfa2opHHnkEK1eu\nxBNPPIGtW7fivffey9ahZEVyG8u4qnn7lVvBu9Rqv6m72qMaqnnNJPniePHzv6MdjQDMOe9ia39i\nSfdBgIDWeBva42FnO0/ALFgr8yeCd1mgf3ZOch9z8qYkXWVn3nZGm+3t9xLD5t2M3slLBhm7iSgL\nsvZJuG7dOsyda+5WNWbMGDQ1NaG11exzrSgKFEVBe3s7VFVFOBxGWVn6/Vb7Ql1jGI+t/djZDjJZ\nqszbvVOY0/LUCgLujUnsrFoYsAN/+XwtXtn5uvVY0Wk6IUkiipUgGqNNiGgRTxaryKI3ePeDOe9U\nZDExsNPtzFtK7K8N5K5Pc7eLxa0n9tU2gURUGLI2bF5fX4/JkxPLlyorK1FXV4eSkhL4/X5ce+21\nmDt3Lvx+P04//XSMGtV5v+aKiiBkuXeXCVVVpZ8rXrHyHWzZ3oiyUABXnNVxO8rikOJ5viCJiXXb\ngJN5y4qEqqoQJFkCYEAQAD3uAwLtHY+nrBhl1hy5IokYXl6DD+o+BQBUliSC9eDqEAJFiYA9fNAg\nVA3su3nvdOfRCNYC7wB6OIjSEn+n5zudygpzSkCWRef5AZ+E8cMruvV6mVx46kT86JE3cN68Q7r1\n+iWhAKqqQrj6nKn45aoNOHXO6C69TjbeS6HhOewdPI89l4tzmLNqc/cwZGtrKx5++GG89NJLKCkp\nwSWXXIKPPvoIEyZMSPv8hoaOwa4nqqpCqKtrSfv9fY1h69/2lI/b19CCOiVxf2tbzDNsPn54CB/s\nBMLhOOrqWtAeiSWK1TQZhi4msnPLVacfin+tM3+uKAoY5B+ED2AGb9lINKNoaQ5DiyZ+dVq72Ol7\nyabOzqMAH04oPh8vvl0HjDO6dYzhtqj1WnCe/7MbjoMgICvveVRVMX57y4kQRaFbr9/cHEZdXQtm\njhuIw7v4Opn+FikznsPewfPYc719DtNdCGRt2Ly6uhr19fXO7b1796KqytzcYuvWrRg2bBgqKyvh\n8/lwxBFHYNOmTdk6lG6xLzbSjdJ2GDbX9KRtPs3MW7NeJ2q0QvBHrBcXALXjdZMsys7wuiyJqA3V\nON9zL7tSJNFTCFWi9M9hcwCo8g0GNB/8Svf+1Ox13u4qc9GqPM+W3hqaL8StGIkoN7IWvOfMmYO1\na9cCADZv3ozq6mqUlJhBpra2Flu3bkUkYgazTZs2YeTIkdk6lG4xUqzTdY8edChYi2uA7B42N7Nq\nOxjvqFqDwNRXrRcSYcQ7NpdQRBmqtaRMEgUMK0kEb/dabrt/+IiQuctSd/t254IdfANK9wZ5ZDm3\nc91ERPkga8PmM2bMwOTJk7FgwQIIgoBly5Zh9erVCIVCmDdvHr7xjW9g0aJFkCQJ06dPxxFHHJH5\nRXPIvdRH1VX88aNVmO3aBlQ1OmbeYlFiqMQQzO87Veae1xZgtJVBLDYff+GEr+HThs9RVTQQmlYH\nwCxYqykZjONqZ0MWZcypmYUnsN45JgD47uGLu70eOVfsgjNfN1qjAole6WKWq8x7C+vUiCgXsjrn\nfdNNN3luu+e0FyxYgAULFmTzx/eIu8nGxvoP8cbut/HG7red76tJvc1jqg6xsinxfGgQBQGaYeCL\nvc3eFzdE6K3lQPUOAImtO4FEm1VZFCAKIs7vZH9nScxun+/eYGfe3a02l6zny3mSeff3iykiOjjk\nRzrTBxKZd+pGG8lz3jEtBiHYAr3NrArXoEIUBei6gR/8YZ33yboIvS310rhZE83+2ccfVpPy+/mm\nImQO6Q8o7V7v9UTm3b+D9xGHmPUcIwf3zzX3RHRwYW/zDARBgF/s2Jc7ntzbXG6EIBjQWiogBJuh\nG6qzx7Wn8xoAASLu/8ZXsOaLCMaVj/Z878hJgzC2tgyVKTZc+NkNxyY6teWJMbVluPvq2RhY1r3g\nbQ+79/fg/c2zJuO8ligGlhVlfjARUQ8xeKfhDJsLqbt6JQ+bq4K5lE2PBiHpkpN5R2Kad/03zD2m\ny0sCWDTp/JQ/e0CaQJevexdXl3c/oLl7m/dnkigycBNRznDYPI3EUjEBmqF3+H6HLUFFa9vOmB/Q\nRWiGBkkUsLehvUPmDZ2nvavsXuH9PfMmIsolRpE03FXDWlKWDXirzQ3DgC5Za7jjPhi6BNWIQxIF\nGAY6ZN727mCUmZ1550vBGhFRLjCKpJEp845riYC8e387oJidwIy4HzBEqIaayBalpODP4N1lSp7M\neRMR5RKjSBruOW/N6Dzz/vmfNkFQYgCAUn8I0BKZNwAIHQrWGIi6yqdIkCUBRT6WZxAR2Ri803A3\nadFTDZu75rwjMRWCEoUiKvjhJbNRO6AUcT0OwT67ycPmev9fn91fyJKI755/GM4/aWxfHwoRUb/B\ndCYDM/NOVbCWCOiabkDyx1DmC6G02I/yYDF2RXRIkpW+JxesaflZNd5XDhle0deHQETUrzDzTkN3\nNWlJOeftWuet6ToMKWoOmQPwS+YabdGa6xaS57w1XjMREVH3MXin47RHFVLPebuGzXUhBggGQtbu\nXgEreAv2RiVi0rC51rHpCxERUVcxeKdhrxRLV7AW01QnOzdEs9K8WDG37fRbu3wJaTNvDpsTEVH3\nMXhnIKYpWPt8dyN+9Zy5B7kmmpXmQTt4S1ZmbQftDpk3h82JiKj7GLy7INWcN0QNb31sbt9pWMG7\nWDaDtzNszsybiIiygME7A90wUg6bu7umGZJZvNZh2FxUARgQ/OGkF2XmTURE3cfgnYFupMm8reBt\nGAYMKXnY3NoRTFQhDdoGsbjZ05iFTVqIiKgnGLwzMAwj5Zy3ORRuQDcMCLKdeZu7StnD5pBUSKX7\nAQCXTlqQk+MlIqKDH4N3BuaweYrMGwAkFf/e+SaU2q0AgGDSnLchqBCKWmDEFVQFB+bkeImI6ODH\n4J2BYaRYKmavAZdUPPnpaufu5DlvTYpADIRhREKQRc5zExFR72DwzkDXOxasybDntL33FyctFYvI\n9eY3wqEO+38TERF1F4N3BobRcT9vUU/MaeuRIud+RTSXgNnD5mFxHwBAiIYwpHgQJMOH+M4xEFiv\nRkREPcDgnYFhGNCT5rwF3cysBUmFEU0Eb8GKyvawuV1ULmh++CQfZukLoe4cl/2DJiKigxqDdwYp\nC9bsJiuSCgjmBPjo8Hzn285SMYtgPV4wmHITEVHPMXin8NH+TyH4zMYqqQrWjLgVjK3gbRgCSvUa\n5/uKKENxFagJujeYExER9QSDd5KWWCt++t5v4J/2CgAz8/7vnibPY1S79kxSIQgGYAiQRG9WHfKF\nnK9FnbuIERFR72HwTtIWbwcAp6hM1XTsaWjzPMauX7MzbxgCxKQzGfKVOF+LOnuZExFR72HwThLT\nY57bcVV35rVtumadNsnsXW4Gb++pLHUFbwHmELoB7+sQERF1B4N3koga8dyOxXVAMAvWoh8dgaJw\nLbR6c37bnXlLSeu/Qkpi2Dz5e0RERD3B4J2kPSl4x1XNybz15gEI7T0aajQAABCUqBO8haQzWepP\nBG8hKXgn3yYiIjoQ7NmZJBz3bt8Zs4bNDQMABLRH44Dqg6EqEAJt1lruFAVrimvOW2SwJiKi3sPM\nO0lY6zhsLgg6YJinqj1ilprr4WIIgTAEUYNhCB0CtGepGDNtIiLqRQzeSbyZt4G4pjtD4wAQjpql\n5kakGIJgQPBFAUPskHlLouR8bX+L5WpERNQbGLyTeDJvwUAsrgGCDlmUMLqmFLo5fg4jXJx4nCFA\nTMquJw+YABgCYtsmdPgeERFRTzB4JwnHXcFbVJ05b1EQ4VcS2bQeDSYel2LYPOQrwYTGi6DtGclh\ncyIi6lUM3knCqmvYXNSdanMR3uAN3fV1ig5rAKwit8SwORERUW9g8E4Sdi0VEyTVWectQoLf5w7Y\n7lPXMfMG4AyxC4zeRETUixi8k3gzbw2abkBwhs1dp8u9Q1iKOW8gEbyd2M2KNSIi6gUM3kncTVoE\n0W5ibgZvn2vY3NATpy7VUjEAmDKyEgBw2NiBnvs5BU5ERD3BJi1JYpqrt7lkB28doiBBkdJn3qnm\nvOfOHIZDhldgWHVJh+8RERF1F4N3kqh7Y5KkzFv2BG9vIE+VeYuCgBGDQx3uJyIi6gkGbxfDMBDX\n4s7txLC5DkmQkoK3O1innvMmIiLKBs55u8R11bttp5TIvCVBhCy5h8q9WXiqYfNkrFcjIqLewODt\nYs93S4JZmCaIGiCqEATAJ/o9mbe7YC3dsHk6zNGJiKgnGLxdolbwDohW9zRRg+Azq89LlNABF6wl\nG1xpvu7omrLeOWAiIipInPN2iVvFakVSEG1aCyCp5sYjAEqVEGQxdcGakWadd7KTZtSiOCBj+riB\nGR9LRESUDoO3i5N5C2aGLEgaBMXMvEt9pZCTsm33110ZNpclEXMOHdJ7B0xERAWJw+YuMavS3O8M\nm6vOsHmZrzT9UrE07VGJiIiyIWPw3rp1ay6Oo1+IWcPmPqMIgJV5W8Pm5f4yyHLP5ryJiIh6Q8bg\n/e1vfxsXXHABVq1ahXA4nOnhec0eNldgBm9zztvMvCuKyrwFa8jc25yIiCgbMs55P//88/jkk0/w\n4osvYuHChZg4cSLOPfdcTJ06NRfHl1N2gxZBV2BoIgRJBXwRGLqIkFKMqBRJ/URD5LA5ERHlTJfm\nvMePH4/rr78eS5YswdatW7F48WJcdNFF+O9//5vlw8stO/OGIQG6DLG4GWKgHdq+wVCUpA5rbhw2\nJyKiHMqYee/cuRN/+tOf8Je//AVjx47F1VdfjWOPPRYbN27EzTffjGeeeSYXx5kT9py3oUkwNAmC\nYt6v7hwHRfL2Nvd0WwOYeRMRUc5kDN4LFy7E17/+dfzhD3/AoEGDnPunTp2aceh8xYoVeP/99yEI\nApYuXep5/K5du/Cd73wH8XgckyZNwp133tmDt9E77A5rhi4CmnlqDEOAEQtAlrztURXZvc5b5Jw3\nERHlTMZh8zVr1mDkyJFO4H7iiSfQ1tYGALj99tvTPu/NN9/Etm3b8NRTT2H58uVYvny55/t33303\nLr/8cjz77LOQJAlffvllT95Hr7CXihmqBEO39u6OKwAESJLgqTZP7rbGYXMiIsqVjMH7e9/7Hurr\n653bkUgEt9xyS8YXXrduHebOnQsAGDNmDJqamtDa2goA0HUdb7/9Nk466SQAwLJly1BTU9OtN9Cb\n7DlvXXNl3roMSTSryd0BO3nZGIfNiYgoVzIG78bGRixatMi5fdlll6G5uTnjC9fX16OiosK5XVlZ\nibq6OgDA/v37UVxcjLvuugsXXHAB7r///u4ce6+z57x1VYSzFEyTnEDtnvNOzrwZvImIKFcyznnH\n43Fs3boVY8aMAQBs2rQJ8Xg8w7M6MgzD8/WePXuwaNEi1NbW4qqrrsLLL7+ME044Ie3zKyqCkGXp\ngH9uZ6qqQp7bwqfmMUqiD7D28jZ0CQFFQlVVCEUlifddFFDgXMIYAgYOKO7weoWiUN93b+I57Dme\nw97B89hzuTiHGYP39773PSxevBgtLS3QNA2VlZW49957M75wdXW1Z7h97969qKqqAgBUVFSgpqYG\nw4cPBwDMnj0bn376aafBu6GhPePPPBBVVSHU1bV47mtpN39GuN2A4Lf28tYlSKKAuroWxOJa4sGu\nixEYApoa2+EvwOQ71XmkA8Nz2HM8h72D57HnevscprsQyDhsPm3aNKxduxbPP/881q5dixdffLFL\nmfecOXOwdu1aAMDmzZtRXV2NkpISAIAsyxg2bJizTnzz5s0YNWpUV99L1tjV5mocTuYNXXIqy93z\n3JJnqRg7rBERUe5kzLxbW1vx5z//GQ0NDQDMYfRVq1bhtdde6/R5M2bMwOTJk7FgwQIIgoBly5Zh\n9erVCIVCmDdvHpYuXYolS5bAMAyMHz/eKV7rS1E9BlmUoaqAPedtqDJ8VtB2B2jJ9bVhCDBARESU\nGxmD9w033ICamhq89tpr+MpXvoLXX38dP/jBD7r04jfddJPn9oQJE5yvR4wYgSeeeOLAjjbL4loc\nPlFBXNUhfjEdyvCPEd5+CJSqjgMU7gI1QTAYvImIKGcyDptHo1HceeedqK2txa233orHHnsML774\nYi6OLeeiWgw+yYeYqkNRy1C291hA9UNJUSgnJbdKNRi+iYgoNzIG73g8jvb2dui6joaGBpSXl2P7\n9u25OLaci2kx+CQz81ZkCbpuBmR3NzWbuymLJAuoCAVydpxERFTYMg6bn3XWWXj66adx7rnn4rTT\nTkNlZSVGjBiRi2PLuZgeQ7lYigZVQzCgQNV0AHDmvN3cwfvsY0alDPBERETZkDF42wVngLmka9++\nfZg4cWLWDyzXDMNATIvDJ/kQ13Qosoj2iAogc+bNGW8iIsqljOmiu7vaoEGDMGnSJCeYH0xUXYUB\nwwzeqg6fLELVzcxbSbEVqOgJ3kRERLmTMfOeOHEifvKTn2D69OlQFMW5f/bs2Vk9sFyLWq1RFVGB\nqhlQZBGaZs15KykK1kT3rmIM30RElDsZg/eHH34IAHjrrbec+wRBOOiCt92gRbY28VZkyZnzTpV5\ne4fN9RwcIRERkSlj8H788cdzcRx9zt4ONBG8RSd4y3IiUFeVB1DXGPEOmzPzJiKiHMoYvC+88MKU\nc9wrV67MygH1leTM2yeLUK1hc9k1RL78yqMQi2t4+p9bnftYsEZERLnUpQ5rtng8jvXr1yMYDGb1\noPqCvZe3CHN+293HXHb1MZcl0dkaVI8UQQyEUSQX5fBIiYio0GUM3rNmzfLcnjNnDq688sqsHVBf\nienmsLmExLC5rUM3Nfs5Hx+B4NCdOPb4g2v+n4iI+reMwTu5m9quXbvw+eefZ+2A+krMybzNU+Ju\nzCKLqZbGGTCixZD3TIFPUlJ8n4iIKDsyBu9LLrnE+VoQBJSUlOC6667L6kH1BSd4GzIA3ZN5y510\nTzv4VrwTEVF/lzF4/+Mf/4Cu6xCtoq14PO5Z732wiOnu4B3zbEYipxk2JyIi6gsZo9LatWuxePFi\n5/ZFF12El156KasH1RfsgjUY5ilxr+2WUgybc3UYERH1lYzB+9FHH8WPf/xj5/bvfvc7PProo1k9\nqL4Qt9Z5Q7fmvBV3wVr6wfGDsVUsERH1bxmDt2EYCIVCzu2SkpKDMmBFtCgAQLCCtzvzdq/ztjHx\nJiKivpJxznvKlCm44YYbMGvWLBiGgVdffRVTpkzJxbHllB287cxbUdzrvDnnTURE/UfG4H3bbbdh\nzZo12LBhAwRBwJlnnolTTjklF8eWU1HVCt6anXm7C9YOvpEGIiLKXxmDdzgchqIouP322wEATzzx\nBMLhMIqLi7N+cLlkZ96GZgZt91KxUNDX4fFVZQEAQG3VwXUeiIio/8s4Hnzrrbeivr7euR2JRHDL\nLbdk9aD6gp1566oZvH2yiOVXHolLTjkEIwaHOjz+lCOH44KTx+HKMybl9DiJiIgyBu/GxkYsWrTI\nuX3ZZZehubk5qwfVFyJaBIqoQNPM24osYsiAYhx/WG3KxyuyhHkzh6XMyomIiLIpY/COx+PYujWx\ng9bGjRsRj8ezelB9IaJFEZD8iKvWHt6ddFUjIiLqSxnnvL/3ve9h8eLFaGlpga7rqKiowL333puL\nY8upqBpFQPYjxuBNRET9XMYINW3aNKxduxarVq3CkiVLUF1djWuuuSYXx5ZTyZm3z9UelYiIqD/J\nmHm/9957WL16NV544QXouo4f/ehHmD9/fi6OLWd0Q0dUi8Ev+xHXmHkTEVH/ljZC/eY3v8Fpp52G\nG2+8EZWVlVi1ahWGDx+O008//aDbmMTuax6Q/IjHzYo1Bm8iIuqv0mbeDz74IMaOHYs77rgDRx11\nFICDt4931FrjHZADaGPmTURE/Vza4P3yyy/jT3/6E5YtWwZd13H22WcflFXmABCx1nj7JbNgTRBS\n7yRGRETUH6RNL6uqqnDVVVdh7dq1WLFiBb744gvs3LkTV199NV555ZVcHmPWOZm3VbDmk6WDdpSB\niIjyX5fGhmfOnIm7774br776Kk444QT8/Oc/z/Zx5VRYjQCAWbCm6hwyJyKifu2AolRJSQkWLFiA\np59+OlvH0ye8mbfG4E1ERP0aoxSA9ngYAFAkB9DcHkdx4OCqpiciooMLgzeAdtUM3qLuRzSmoao8\n0MdHRERElB6DN4D2eDsAIBoxT0dVeVFfHg4REVGnGLwBtFmZd6SNwZuIiPo/Bm8kMu+WVvM2gzcR\nEfVnDN5IzHk3NZnd1TjnTURE/RmDN4C2eDsUUUFLmxm8K0L+Pj4iIiKi9Bi8YQ6bFytBRGPmpiQ+\nhduBEhFR/8XgDXPYPCgXIRLX4FNEiGyNSkRE/VjBB2/d0BFWIwgqRYjFNfiZdRMRUT9X8ME7rEZg\nwECxHEQ6NoGoAAAYmElEQVSUwZuIiPIAg7ddad6so6E5Cr+PwZuIiPq3gg/eMc3co3zL9jYYADNv\nIiLq9wo+eMd1M3gbunkqGLyJiKi/Y/DWVfMLBm8iIsoTDN5W5g3dDNo+peBPCRER9XMFH6ni1pw3\nDPNUBFiwRkRE/RyDtzPnbWfeDN5ERNS/MXhzzpuIiPIMg7cz583gTURE+YHBW/MOmzN4ExFRf5fV\n4L1ixQqcf/75WLBgATZs2JDyMffffz8WLlyYzcPolDNsbhWsiSI3JSEiov4ta8H7zTffxLZt2/DU\nU09h+fLlWL58eYfHbNmyBf/5z3+ydQhdkrxUTNP0PjwaIiKizLIWvNetW4e5c+cCAMaMGYOmpia0\ntrZ6HnP33XfjxhtvzNYhdEksqcOapht9eThEREQZZS1419fXo6KiwrldWVmJuro65/bq1asxa9Ys\n1NbWZusQukR1qs3NzLu4SOnDoyEiIspMztUPMoxERtvY2IjVq1fj0UcfxZ49e7r0/IqKIGS5d4vJ\nqqpCkD63bugi5h85Al89aTwkznsfkKqqUF8fQt7jOew5nsPewfPYc7k4h1kL3tXV1aivr3du7927\nF1VVVQCA9evXY//+/bjooosQi8XwxRdfYMWKFVi6dGna12toaO/V46uqCqGurgXN7ebrGrqEuTNq\nsH9fa4Znkpt9Hqn7eA57juewd/A89lxvn8N0FwJZGzafM2cO1q5dCwDYvHkzqqurUVJSAgA45ZRT\n8MILL+Dpp5/Gz372M0yePLnTwJ1NqqvaXBILfuUcERHlgaxl3jNmzMDkyZOxYMECCIKAZcuWYfXq\n1QiFQpg3b162fuwBi7matMgSh8uJiKj/y+qc90033eS5PWHChA6PGTp0KB5//PFsHkannI1JdAmy\nxMybiIj6v4KPVqquWg1aBBaqERFRXij44B3T4xAMs4qdmTcREeWDgo9WcSt4CwJboxIRUX5g8NZU\nCKw0JyKiPFLwESuuxwFDYqU5ERHlDQZvPW4tEyv4U0FERHmioCOWYRiIaXFAl1lpTkREeaOgg7dq\naDBgwGCDFiIiyiMFHbxjWsz8QpcgcdiciIjyREFHLDt4G5rEYXMiIsobhR28rb7mhsaCNSIiyh8F\nHbHszFtn5k1ERHmkwIM3M28iIso/BR2xYnoi82a1ORER5YvCDt4sWCMiojxU4MHb3stb5FIxIiLK\nGwUdsRLrvGXOeRMRUd4o6IjlLBXTRQ6bExFR3ijs4O3qsMaCNSIiyhcM3gCgSdzPm4iI8kZBR6zE\nsLkERSnoU0FERHmkoCOWe9hcYcEaERHliYKOWFFnqZgEHzNvIiLKEwUdseJWhzWDmTcREeWRgo5Y\nMVfmrchS3x4MERFRFxV08I46c94iFLmgTwUREeWRgo5Yqq5CggRAgI/Bm4iI8kRBRyzVUCEKMgAw\n8yYiorxR0BErrschwpzrZvAmIqJ8UdARK66pruDNgjUiIsoPBR28VUOFYJingJk3ERHli4KOWKqu\nQrAybxasERFRvijoiBXXVQgG57yJiCi/FGzEMgzDzLw5bE5ERHmmYCOWqqvmF8y8iYgozxRsxIpr\nVvDW7cyb1eZERJQfCjd423t5W8PmLFgjIqJ8UbARy868DZ1z3kRElF8KNmLFrMwbmghBACRR6NsD\nIiIi6qKCDd6qlXnrugBFFiEIDN5ERJQfCjZ423t5G5oIRSrY00BERHmoYKOWXbCmaQJ8CivNiYgo\nfxRu8LaHzTWBmTcREeWVgo1acd0O3iIUpWBPAxER5aGCjVpxa85bUwXIYsGeBiIiykMFG7Xcw+ay\nzEpzIiLKH4UbvK2CNV0TITHzJiKiPFKwUcvpbW6IkCVm3kRElD8KN3jbvc11ETKrzYmIKI8UbNSy\nm7TAENkalYiI8krBBm9nP29dhMTMm4iI8kjBRq2Ys6uYBJmZNxER5ZGCDd5x97A5C9aIiCiPyNl8\n8RUrVuD999+HIAhYunQppk6d6nxv/fr1eOCBByCKIkaNGoXly5dDzOGSrYgaNb/QJBasERFRXsla\n1HrzzTexbds2PPXUU1i+fDmWL1/u+f4dd9yBhx56CE8++STa2trw6quvZutQUgqrEQCAocssWCMi\norySteC9bt06zJ07FwAwZswYNDU1obW11fn+6tWrMXjwYABAZWUlGhoasnUoKUXiZvCGJjPzJiKi\nvJK1qFVfX4+KigrndmVlJerq6pzbJSUlAIC9e/fi9ddfx/HHH5+tQ0kprEYhQLCqzZl5ExFR/sjq\nnLebYRgd7tu3bx+uvvpqLFu2zBPoU6moCEKWe2/f7XA8Ap/kRzsElJYEUFUV6rXXLjQ8dz3Hc9hz\nPIe9g+ex53JxDrMWvKurq1FfX+/c3rt3L6qqqpzbra2tuPLKK3HDDTfgmGOOyfh6DQ3tvXp8YTUC\nBQoAIBqNo66upVdfv1BUVYV47nqI57DneA57B89jz/X2OUx3IZC1YfM5c+Zg7dq1AIDNmzejurra\nGSoHgLvvvhuXXHIJjjvuuGwdQqci8QgU0QcALFgjIqK8krXMe8aMGZg8eTIWLFgAQRCwbNkyrF69\nGqFQCMcccwyee+45bNu2Dc8++ywA4IwzzsD555+frcPpIKxGUSGXAgAL1oiIKK9kdc77pptu8tye\nMGGC8/WmTZuy+aM7FddVqLoKQTffPoM3EVHfevnlv+OEE07u0mN/8pP7ce65C1BTU5vlo+q/CjJq\nRa0GLbv2xgBw2JyIqC/t2vUl/va3tV1+/PXXf7egAzeQw2rz/iSimcHb0MzqdS4VIyLqOw88cA8+\n/HAzHn30N9B1HV9+uRO7dn2JBx/8Be66607U1e1FOBzG5ZdfhTlzjsV1112F73znFvzzn39HW1sr\nvvhiG3bu3IFvf/u7mD17jvO6qqpi+fIfdHj+J598hPvvvweiKGDKlGm49trrU95n/5zRo8di1aqn\n0NjYiOnTD8eTT/4v2tvbcd11N+Ldd9/Gyy//HbquY/bsObj11u+ipaUFd955G9ra2lBSUoI77vgf\nXH75Rfj9759AMBjEhg3v4cknV2LFih93+5wVZPCOWsEb9rB5DtuyEhH1Z0//Ywv+89HeXn3NmROq\ncd5JY9N+/4ILFmL16qdx2WVX4pFHHoaqxvGLX/wWDQ37MWvWUTj11DOwc+cO3H77EsyZc6znuXv3\n7sF99z2E9ev/jT//eZUneLe0NKd8/oMP3oebb16KsWPH4Uc/ugO7d+9KeV86W7duwRNPrIbP58O7\n776NX/zitxBFEeeddxauvfabeOKJxzFr1myce+4CPPXUSrzzzls47rgT8dpr/8L8+afgtddewbx5\nX+nROS3I4G33NTc08+0z8yYi6j8mTpwMAAiFSvHhh5uxZs1qCIKI5uamDo+dOvUwAObyZHcXz86e\n/8UX2zB27DgAwO2335n2vnTGjh0Hn89crRQIBHDddVdBkiQ0NjaisbERn3zyEa644hoAwPnnXwQA\nqKmpxW9/+0vMn38K3n33bXzjG1cf+IlxKczgrSU2JQFYsEZEZDvvpLGdZsm5oChmD46//vUlNDc3\n4+c//y2am5txxRULOzxWkhLNu5KbgaV7fqpNsFLdJwiJxE5V1Q7Ht3v3Ljz11Er87ncrEQwGsXDh\nedZrSTAM3fNaY8eOw759+/Dhh5sxatQY+P3+zk9CBgUZtSL2piR25s2CNSKiPiOKIjRN63B/Y2Mj\nhgypgSiKeOWVfyAejx/Q66Z7/siRo7B5s7ni6a677sR///t5yvuKi4uxb5/ZbGzjxvdTvn5FRQWC\nwSA+/vgj7N69G/F4HBMnTsLbb/8HAPDcc6vw4ot/AQCcdNI8PPDAPZg375QDeh+pFGTwthlx88qH\nmTcRUd8ZMWIUPv74Izz00P2e+0844ST8+9+v4vrrr0FRURGqq6vx6KO/6fLrpnv+9dffhJ/97P/D\nNdd8A6FQKUaOHJXyvjPPPAf3338vbr75egwcWNXh9ceNG4+ioiCuueZy/P3v/4ezzjoHP/zhD3Hu\nuRdg06YNuO66q/Dvf7+G448/EQBw8snzsHfvXhx++MyenTAAgpGq6Xg/1Jvt5uJaHNf89hnojdWA\nIeKWC6ZjwojOe6tTamyn2HM8hz3Hc9g7eB57rrNz+Pzza7B79y584xvfPKDXS6Ug57wVSYHeMNi5\nzcybiIiy6Z57/gdffrkTd911X6+8XkEG72SsNiciomy69dbbevX1CjLl1HXvTAEL1oiIKJ8UZPCO\nxr1VjRw2JyKifFKQUSvWIXgz8yYiovxRkME7OfOW2B6ViIjySEFGrWjc2/mGmTcRUd96+eW/H/Bz\n3nvvHTQ07M/C0fR/hRm8Y0mZN+e8iYj6zIFuCWp7/vk1BRu8C3KpWMdhc2beRER9xb0l6PnnX4gV\nK36IlpYWaJqGG264GWPHjsP//u/v8cor/4Qoipgz51hMnDgJr776Mj7//DP8z//ci8GDzd4dfbEN\n6OWXX+VsAxqLReD3F2VlG1A3Bm+w2pyIyLZ6y1/w7t6Nvfqa06sPxTljz0j7ffeWoL///W9x5JFH\n4//9v6/i888/w09+ch8efPAXePLJ/8Vzz70ESZLw3HOrMHPmURg7djy+851bnMAN9M02oOeff6Gz\nDejixVfiZz/7VVa2AXVj8AabtBAR9RcbN25AY2MD1q59AQAQjZobSZ1wwsm44YbFmDfvFMyfn35j\nj77YBrS5uTkn24C6FWTwrgz54ZNF6IYBVTMgCgzeREQAcM7YMzrNkrNNUWTceOPNmDJlquf+m276\nHrZt+y/+8Y+/4lvf+iZ+/es/pHz+wbwNqOfYe+2V8sghwyvw1IrT8fBNJ+DXN5/Q14dDRFTQ3FuC\nTpo0Bf/618sAgM8//wxPPvm/aG1txaOP/gYjRozEZZddiVCoDO3tbSm3Ej2YtwH1nLNefbU8Iksi\nBEHgfDcRUR9zbwn69a+fj507t2Px4itwzz3/g8MOm4GSkhI0NjbgyisX4dvfvhqTJ09BaWkZDjts\nBm677VZ89tlW57X6YhvQ+++/x9kGdOHChVnbBtStILcEBbj1XW/heew5nsOe4znsHTyPPZd8Druz\nDWjy66VSkHPeRERE2dbb24C6MXgTERFlQW9vA+rGCV8iIqI8w+BNRESUZxi8iYiI8gyDNxERUZ5h\n8CYiIsozDN5ERER5hsGbiIgozzB4ExER5Zm8aY9KREREJmbeREREeYbBm4iIKM8weBMREeUZBm8i\nIqI8w+BNRESUZxi8iYiI8kxB7ue9YsUKvP/++xAEAUuXLsXUqVP7+pD6tU8++QSLFy/GpZdeiosv\nvhi7du3CLbfcAk3TUFVVhR//+Mfw+XxYs2YN/vCHP0AURZx33nk499xz+/rQ+417770Xb7/9NlRV\nxTe/+U0ceuihPIcHIBwOY8mSJdi3bx+i0SgWL16MCRMm8Bx2UyQSwRlnnIHFixdj9uzZPI8H4I03\n3sD111+PcePGAQDGjx+PK664Ivfn0Cgwb7zxhnHVVVcZhmEYW7ZsMc4777w+PqL+ra2tzbj44ouN\n2267zXj88ccNwzCMJUuWGC+88IJhGIZx//33GytXrjTa2tqM+fPnG83NzUY4HDZOP/10o6GhoS8P\nvd9Yt26dccUVVxiGYRj79+83jj/+eJ7DA/T8888bv/71rw3DMIwdO3YY8+fP5znsgQceeMA455xz\njFWrVvE8HqD169cb3/rWtzz39cU5LLhh83Xr1mHu3LkAgDFjxqCpqQmtra19fFT9l8/nw29+8xtU\nV1c7973xxhs4+eSTAQAnnngi1q1bh/fffx+HHnooQqEQAoEAZsyYgXfeeaevDrtfmTlzJn7yk58A\nAEpLSxEOh3kOD9Bpp52GK6+8EgCwa9cuDBo0iOewm7Zu3YotW7bghBNOAMD/z72hL85hwQXv+vp6\nVFRUOLcrKytRV1fXh0fUv8myjEAg4LkvHA7D5/MBAAYMGIC6ujrU19ejsrLSeQzPa4IkSQgGgwCA\nZ599FscddxzPYTctWLAAN910E5YuXcpz2E333HMPlixZ4tzmeTxwW7ZswdVXX40LLrgAr7/+ep+c\nw4Kc83Yz2B22R9KdP57Xjv72t7/h2Wefxe9+9zvMnz/fuZ/nsOuefPJJfPjhh7j55ps954fnsGue\ne+45HHbYYRg2bFjK7/M8ZjZy5Ehcd911OPXUU7F9+3YsWrQImqY538/VOSy44F1dXY36+nrn9t69\ne1FVVdWHR5R/gsEgIpEIAoEA9uzZg+rq6pTn9bDDDuvDo+xfXn31VfzqV7/Cb3/7W4RCIZ7DA7Rp\n0yYMGDAAQ4YMwcSJE6FpGoqLi3kOD9DLL7+M7du34+WXX8bu3bvh8/n4t3iABg0ahNNOOw0AMHz4\ncAwcOBAbN27M+TksuGHzOXPmYO3atQCAzZs3o7q6GiUlJX18VPnl6KOPds7h//3f/+HYY4/FtGnT\nsHHjRjQ3N6OtrQ3vvPMOjjjiiD4+0v6hpaUF9957Lx5++GGUl5cD4Dk8UG+99RZ+97vfATCnvtrb\n23kOu+HBBx/EqlWr8PTTT+Pcc8/F4sWLeR4P0Jo1a/DII48AAOrq6rBv3z6cc845OT+HBbmr2H33\n3Ye33noLgiBg2bJlmDBhQl8fUr+1adMm3HPPPdi5cydkWcagQYNw3333YcmSJYhGo6ipqcFdd90F\nRVHw0ksv4ZFHHoEgCLj44otx5pln9vXh9wtPPfUUfvrTn2LUqFHOfXfffTduu+02nsMuikQi+P73\nv49du3YhEonguuuuw5QpU3DrrbfyHHbTT3/6U9TW1uKYY47heTwAra2tuOmmm9Dc3Ix4PI7rrrsO\nEydOzPk5LMjgTURElM8KbticiIgo3zF4ExER5RkGbyIiojzD4E1ERJRnGLyJiIjyTME1aSHKN/fe\ney82btyIaDSKDz74ANOnTwcAfO1rX8NXv/rVLr3Gr3/9a4wfP97pZ53KwoUL8fvf/x6SJPXGYXvs\n2bMHn332GWbPnt3rr01UiLhUjChP7NixAxdeeCH+9a9/9fWhHLA1a9Zg69atuPHGG/v6UIgOCsy8\nifLYT3/6U+zYsQNffvklbr31VkQiEdx3333w+XyIRCJYtmwZJk+ejCVLluDwww/H7Nmzcc011+CY\nY47Bhg0b0NbWhocffhiDBg3CIYccgs2bN+OXv/wlGhsbsXv3bmzbtg1HHnkkbr/9dkSjUdx6663Y\nuXMnBg8eDEmSMGfOHM8exW1tbfjud7+L5uZmqKqKE088EWeccQYefPBBGIaB8vJyXHTRRbjzzjux\nbds2tLW14YwzzsDll1+O1atX469//SsEQcCePXswevRorFixAoqi9OEZJuqfOOdNlOd27NiBxx57\nDFOmTEFjYyN+8IMf4LHHHsOiRYvw8MMPd3j81q1bcc4552DlypWYOHEiXnzxxQ6P+eCDD/DQQw/h\n2WefxerVq9HU1IQ1a9ZAVVU888wzuOOOO/D66693eN6///1vqKqKP/7xj3jyyScRDAZRW1uLs88+\nG2eeeSYuu+wyPPbYY6iursbjjz+OZ555Bs8//zw++ugjAMDGjRv///bu2CW1MIzj+NcONQQRQi3W\nYnBsjDoSBFKNOVaEo0M4REO4HGyrKQin5ob+gDBaoiVyECEipakhWkKkQKFoiERPd5DOzYxLlysX\njvw+4+F5X97tx/PyHh7S6TSHh4eUy2VP3jKI/A/qvEU8bmJiAp/PB8DQ0BC7u7u8vb3x8vLC4OBg\nW73f78c0TQACgQBPT09tNZZlYRgGhmHg9/t5fn7m5uaG6elpAIaHh7Esq23d1NQUe3t7bGxsMDc3\nx8rKCj09rT3CxcUFDw8PXF5eAlCr1bi/v3fXf4xPnZyc5O7uzp2TLCK/KbxFPO7ztbJt22xvbzMz\nM8P5+bk7zOOzrw/Svnv28l2N4zgtQfw1lKE5y/j4+JhiscjZ2RnLy8scHR211PT19bG+vs7CwkLL\n90wmg+M4fzyXiDTp2lyki1QqFUzTpNFocHp6Sq1W69jeY2NjFItFAKrVKldXV201uVyObDaLZVnY\ntk1/fz/VahWfz0e9XgeaXf3HVb3jOOzs7Ljd//X1Na+vr7y/v1MoFBgfH+/Y+UW6iTpvkS6SSCSI\nx+MEAgFWV1exbZuDg4OO7L20tEQ2myUWizE6Oko4HG7r0IPBIKlUiv39fQzDIBKJMDIyQjgcJplM\n0tvby9raGre3t8RiMRqNBvPz8+6o1FAoxObmJqVSCdM0iUQiHTm7SLfRr2Ii8iOPj48UCgWi0SiO\n47C4uMjW1pb73/m/ymQy5PN50ul0R/YT6WbqvEXkRwYGBjg5OXHnE8/OznYsuEXk76jzFhER8Rg9\nWBMREfEYhbeIiIjHKLxFREQ8RuEtIiLiMQpvERERj1F4i4iIeMwvRph4T/csGFUAAAAASUVORK5C\nYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYwAAAEcCAYAAADUX4MJAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXeAHMWZ/v/pNGlnc5S0ymmFUE6WEAgQ2UJkGxtjsMEG\nbDD+YnNwZ3PnH+fD2GcwnDFHMBmcwETLIiMJ5YByzqvd1eY0eTr9/uie7p7dlRACHQ7z/LM73dVV\n1dXd71NvqLcE0zRNcsghhxxyyOFjIH7eHcghhxxyyOHvAznCyCGHHHLI4ZiQI4wccsghhxyOCTnC\nyCGHHHLI4ZiQI4wccsghhxyOCTnCyCGHHHLI4ZiQI4wccviUWL16NXPmzDmmsg899BC33377Ce5R\nDjmcGOQII4dPjTPPPJNx48bR2dmZdfyiiy6ipqaGhoYGAO68805qamrYvHmzU6a2tpaamhrn99VX\nX81LL73k/H7kkUeYO3cukydP5vTTT+e2224DYN68eUyePJnJkydz0kknMX78eCZNmsTkyZN57LHH\nTuTt9glBEE5I2Rxy+FuC/Hl3IId/DFRXV7NgwQKuuuoqAHbt2kUqlcoSjoIgUFRUxAMPPMATTzyR\ndbwvvPLKK7zxxhs888wzVFdX09bWxvvvvw/AX/7yF6fc1VdfzcUXX8xll112Im7tnwa6riNJ0ufd\njRz+hpHTMHL4THDRRRfxyiuvOL9feeUVLrnkkl7lLrnkEnbu3MnatWs/ts4tW7Ywe/ZsqqurASgt\nLeWKK67os+zHJSx46KGHuPXWW7n99tuZPHky8+fP58CBAzz22GPMmjWLM844g+XLlzvlm5ubuemm\nm5gxYwbnnnsuL774onMulUpx5513Mn36dObNm5elMWWu/d73vsfMmTM566yzeO655z72XgG6u7u5\n8cYbmTlzJjNmzODGG2+kqanJOd/V1cW//uu/cuqppzJjxgxuvvlm59y7777LxRdfzJQpUzjnnHNY\nunQpYGl/K1asyBqHjEmsvr6empoaXnrpJc444wyuvfZaAG699VZmz57NtGnTuPrqq9mzZ0/Wvd97\n772ceeaZTJ06lauuuopUKsUNN9zACy+8kHU/8+fP57333jume8/h7wM5wsjhM8GECROIxWLs27cP\nwzB48803mT9/fi9BHggEuPHGG7n//vuPqc5XX32VJ554gi1btmAYxqfq46JFi7jkkktYu3YtY8aM\n4brrrsM0TT788EO+853vcNdddzllb7vtNvr378/SpUt58MEHuf/++1m5ciUAv/71r6mrq+O9997j\niSee4NVXX3WuM02TG2+8kTFjxrB06VKefvppnn32WZYtW/ax/TMMg8suu4zFixfzwQcfEAgEuPvu\nu53zt99+O6lUioULF7J8+XJHwG/atIk777yTO+64g3Xr1vH8888zYMCAI7bTU6Nbu3YtCxcudLS+\nOXPm8M4777B8+XJOOukkfvjDHzpl7733XrZt28Yf//hH1qxZw+23344oilx88cW89tprTrkdO3bQ\n3Nx8zL6dHP4+kCOMHD4zXHTRRbz66qssW7aMYcOGUVFR0We5L33pSxw+fJgPP/zwqPXNnz+fu+66\ni2XLlnH11Vcza9asT+WfmDp1KrNmzUIURc477zw6Ojr49re/jSRJXHDBBTQ0NBCNRjl8+DDr16/n\nhz/8IYqiUFNTwxVXXOEIxDfffJObbrqJ/Px8Kisrufrqq502Nm3aRGdnJzfddBOSJFFdXc0VV1zB\nggULPrZ/RUVFnH322fh8PkKhEDfccIOjiTU3N7N06VLuvvtuwuEwkiQxdepUAF566SUuv/xyZs6c\nCUBFRQVDhw49pjERBIFbbrmFQCCAz+cD4NJLLyUYDKIoCt/97nfZsWMH0WgU0zR5+eWX+fGPf0x5\neTmCIDBx4kQURWHu3LkcPHiQ2tpaAF577TUuuOACZDln9f5HQu5p5vCZYf78+Xzta1+jrq6Oiy66\n6IjlfD4f3/nOd3jwwQe57777jlrnvHnzmDdvHrqu8+677/KDH/yAsWPHcsopp3zi/pWWljr/BwIB\niouLndl2IBDANE1isRgtLS0UFhYSDAad8v3792fr1q2AJbyrqqqyzmXQ0NBAU1MT06dPByyNwzAM\npk2b9rH9SyaT3HPPPSxdupTu7m5M0yQej2OaJo2NjRQWFhIOh3td19jY+Klm8t57MQyD+++/n7fe\neouOjg4EQUAQBDo6Okin06TTaQYOHNirDp/Px/nnn8/rr7/Od7/7XRYsWMCvf/3r4+5TDn+byGkY\nOXxm6N+/PwMGDGDJkiWcc845Ry176aWXEolEeOedd46pbkmSOPfccxk9ejS7d+/+LLp7RFRUVNDV\n1UU8HneOHT582NGYysvLOXz4sHMuEwUG0K9fP6qrq1m9ejWrV69mzZo1rFu3jkceeeRj233yySc5\ncOAAL730EmvXrnV8AqZp0q9fP7q6uohGo72uq6qq4tChQ33WGQqFSCaTzu+WlpZeZbwmqjfeeIMP\nPviAZ555hrVr1/L+++87ZsXi4mL8fr+jRfTExRdfzOuvv86KFSsIBoNMmDDhY+85h78v5Agjh88U\n99xzD8888wyBQOCo5SRJ4uabb+bxxx8/YplXXnmFxYsXE4vFME2TxYsXs3fvXsaPH/9ZdzsLVVVV\nTJo0ifvvv590Os2OHTt46aWXmD9/PgDnn38+jz76KN3d3TQ2NvL88887144fP55wOMzjjz9OKpVC\n13V2797dyzHeF2KxGIFAgHA4TGdnZ9YMvby8nNNOO42f/OQndHd3o2maY666/PLLefnll1m5ciWm\nadLU1MS+ffsAqKmpYcGCBWiaxubNm3nrrbey2uzpY4rFYvh8PgoKCojH49x3330OoQiCwKWXXsq9\n995Lc3MzhmGwYcMGVFUFYOLEiQiCwL333ntUDTOHv1/kCCOHTw3vDHXgwIGMHTu2z3M9MW/ePCoq\nKnqF3mYQDod55JFHOPPMM5k2bRr33XcfP/nJT5g8efIR2/808NZz3333UVdXx6mnnsr3vvc9br31\nVsdHcPPNN9O/f3/mzp3L9ddfz8UXX+xcJ4oijzzyCDt27GDu3LnMmjWLu+66q0/NoCeuueYaEokE\nM2bM4Morr+xlZvrFL36BLMucf/75nHLKKTz77LOARVL33HMP99xzD1OmTOHrX/+6owHdeuut1NbW\nMn36dH7zm99w4YUXHvGewdIS+vXrx2mnnca8efOYNGlS1vk77riDUaNGcfnllzNjxgzuu+++LNK5\n+OKL2b17t0OuOfxjQTiRGyj927/9G4sWLaK0tJQ33nijzzI//elPWbJkCcFgkHvvvZcxY8acqO7k\nkEMOJxivvvoqL774Yq8Q2xz+MXBCNYxLL700a4FWTyxevJja2lrefvtt7r77bv7jP/7jRHYnhxxy\nOIFIJBL8/ve/58tf/vLn3ZUcThBOKGFMnTqVgoKCI55/7733HHV+woQJRCIRWltbT2SXcsghhxOA\npUuXMmvWLMrLy5k3b97n3Z0cThA+17DanuGJlZWVNDU1UVZW9jn2KocccvikmD17NuvXr/+8u5HD\nCcbn6vTuy32SS8yWQw455PC3ic9Vw6isrKSxsdH53djYeMTVwV6Yppkjlj6w+1AHtz2whLOmDeLW\nKycdsdyFP7BWLL/+y/m5cQSu+veFdMfSnDdzCN+9vO+1A5kxe+JHZ1NREvq/7N5x48ofLSAYUHjq\nrqOvifm/RmYsM3juJ+dRlO//VHXe98I6Fn1UR2VJiN/+6Ow+2/vpjbOYMLL8U7Xz94LMPT/6r3N5\n8vWtrNrayOCqfB66/cxPVe8JJ4yjBWHNnTuXF154gQsuuIANGzZQUFBwTOYoQRBoaYl8lt38u0V5\neb4zFoebrL+xeOqYxudwYzeK/I8TWe0di08Cv2KNQWdX4mOv37anBWFoyXH17/8S5eX5pFQD01T/\n5r+Vg3UdqGV5n6qOZMpaC2IYxhHvt+sYnu8/Glpbo6TTGgC6/ulyscEJJowf/OAHrFq1is7OTk4/\n/XRuueUWVFVFEAS+/OUvM2fOHBYvXszZZ59NMBjkZz/72Ynszj88kpkXwzi2SGlVM/6hCON4EfBZ\nn0EipX1s2cb2OGP/DgjDNE003cAwzL95jTyaUP9vGjpxKwj+ZqHrn+09n1DC+Lg8QQD//u//fiK7\n8HeFV5bsQ5FF5s0aclzXJ9M6AFqPl0TTDR59fSunnNyPiSNdDU7tMeMwTJPH39jG5FHlTKv5eNPg\nkbB6exMbdrdy/YUnIX4Ggmr7wQ4+3NjANy4Y84kJ7u3Vtai6wRdnDgFg4cqDxFMal80Z7pQJ+qw9\nIHYe6uRXf9rI9fPGkB/y9VlfY1u8z+PHCtM0eXrhDkZWFzF7fL9jvm7jnlbW7mzmG+ePQRSPPqYv\nLdpLSZGVB8swTVTNwKdIJFIaD7+yme64ypTR5Zw7bRCPvLaFc6cPomZwcVYde+q7+MvyA5w7fRDv\nravj+nljHGI9XpimydKGlQihbsy4Gz0ZswnDNE1eeGcXowYWMX1MZda1ndEUzyzcwVfPHkV5UZBn\n39zBkH4FnDahP0fC+l0trNja2Ov42wc/YEndCn404zaCspWRIJnWeGLBdubNHMLgqnyn7CtL9iHL\nIudOG8hv/7KNc6YPQhDg7dWH+OYXxyAKAk/+dTtnTBrAqIFFR+zLB+vr2X6gHd0wue6LJxHwSTy5\ncAszavpRVhTkpUV7+cYFYwgHlaOOoWGaPPfWTsYMLu41Ru3dSZ57aydfO2e0c+wXv19Pfsiq87Og\njlzywb8hvLH8AMBxE0bKJoyequfO2k7W7Wxh3c4WnrzTtWFqWna5uuYoq7Y1sWpbE9PuPH5b5yOv\nWUn65s8eStVnYO//799b0TcnDyth1snHLmQB/vC+tZdDhjBeXLQXIIswAn7rM0imdTbva2PBioNc\nOXdkVj2CYE1Qu2Kp47qHDJJpnQ83HebDTYc/EWE8+NImAM6ZNoiBFb0TEHrx15UHs9tUdXyKxKHm\nKFsPdADQHUtTkh9g4942Nu5ty3ovAH7+wkfohsmmvW0ArNhSzBmTq4+5v31hS9t2/rDzFQKjAyTW\nn+4cz2gY0YTK+x/V8/5H9VnCcEf7bl5eu5Y9eytpj2zmrmumsmhDA2xoOCJh7O7Yy0MLt2AmXOGv\n2xrGa3sXAlAfPcyIIiur73vr6li3s4Wt+9t5+DZ3hX3mmxxVXcjanS0E/DLLNh3GBEYMKCQUkFm9\nby8fRRfxmytvQBazReob+94iko7y7lsuIb+5+iCD+iusDz7HmlWDKY9PpaE1RuHivXz9vBqOhuaO\nBIs3NLB4Q0MvwvjtX7axo7YT5QN3/5JoQiWaUBH8cUw+/beYs0f8A8HRMHqYpOJHMLX01DAy139W\n+KyTCLR19y2sNUPrs62jte89F/Rnf+R9jYMsiUc890lgfMoxMY7R3OhFMq2zs30Pf2n4MwhW/6MJ\nFekomkrGrCnkdYKkEgpkz3y70xHePvgBunHs47Hy8DrrHymddTyaUDFMgx1te+hrHvzrDY9TL68H\nOUVje5y0mmnT4L9W3c/Le/6SVd4wDR5Y/yiBccuy6lM1g7Tumr/SejrrnFjYgjFkFRtatvTqQyRu\nXdfUHndqTKQ1UqqO/+RlyFUH2dDcO1/YmwfeY1nDqqx+mCbsi1i5vuSqg6iabtenO/3f3bG3z/e3\nuaNvDVc3dGqL30Cu3oUiZYt1sbCFwIQlJPx1fV77SZAjjL8RaJ+BQyp5BOdW0kMY3pdQ7aFhpNTP\nmjA+m3qCfstk1BntTRgN0UZuXfRvvFu7uNe5tOreX8/xTXvuvafZrOe4eMt8WsI4VptyR7KTrW07\nevUn84w/CZIpjf/Z8Bh7YzsR861913XDxMRECHXjG7GepfUre10nFjcSGLsSZeBOeloWf7n2N7y2\ndyEfNW/qdd2KhjV9Po+D3XZG3VS2hhRNqLy463We3v00UqmV+TcSt4R5d9p1Uguyagl9ezzEgg4a\nYo28V7skS1uOqwn3HvLbnf9VzeBQpN75HVOzha/cfy9SUSsv7sqO4gKIJlUEX5zD7VGHaFXNQNMM\nBNF+pj0GyUtOSO5zkyWRjnRH1m9w39E/7XqNB9Y/yrrmjb364TWJxpJu/S2JNkx/DKX/Pgrzss2p\nUqG1GDoldvWq75MiRxh/IzheQaRqOnsbupw6hGCEpsKlJDUrpfXuuk7W7Gy2CitJNh5yzRVb9reh\n6Qa76zqtvReSfQuj7liaw22xY+pPbVME5DRCsBvdMDnUHKUj0lvQ17VE+WhXizO7OhqKwn7kfnvZ\nl9rE4bYYLZ2uQFh52MrY+vq+N3td53Wm9iTDlGe89R47+Xn71BlN0dQedz7mhJo9O/44dMfTNLS6\nY9fXxKC2KdKLDH+8/B4e3vgknamurPtNqTrN8RY6U9kf//aWvexv7OxTgzkcdVOaC7Lb/2hCQ648\niFTSxO93vuwcb+uy3h2l2kojLxa0O+S7pXU7d3z4/9GWtASxZljvzKHmKA2tMZYcWM/zO17klT0L\n2LinlbSqs35XC3/e9i4dKYusDDF7DOtaoyypt7bHFQKWQFy5rQnTNNnZ7ppXBMW6LqNhSCWufyLz\n/bR0Jlm5y33HMwQJ1thn+g0QVd3nEjHakeyykXSUVdsbWbuj2Tm/uHYVgYlLSBbudrSvnYc62RV3\ntZGE5qaRB9hS56ac9457Q2uMpqSVHNLURYcwDhyO0NAa48N6a0vdPa3ZGoGmG/z10EL8J60ATJra\nrfdibeP6LHI50Nht/aOk8I34CKnEakslu3/Hg5wP428ExzNzBPjv59exYvNhbr9yIsm0hlxeRzJY\nx57O/QwMDONnz3/klA1OWsTjexYB5wHw4gd7WbDccgJ/4/yarFl3Bg3RRn76wROk9kzgkVvOR5Gl\nI/ZFNwx+8tQafKM2IxW1cChaw2//ZM0Ye9rI//2J1QB8/bzRnD7xyNuJAsgSyAP20KqH+NHjpVn1\ntdtCqMTf2+HoJYxkSifPY1ZJpjUK7JmYN6pMHrCLneHFrG5UmV41mcde30pDWxzdMJEqD9A+eAeH\nIoMZmJ9tO09oSVJ6iiJ/YdbxH/5mGZpu8ugPT0eRxSzCeLd2MZXBCn711OGse1IN913oSnXT2elu\n5BRPqfxy3aP0z6vi+5NvBGB72y4e2vxbzHSA6092d//LoDHu7gsu+JKOP+ZQpA653J1xx9UEISXI\n7f+7HDAQApZANVNB0jaJvrxnQZagTWgJuqIp7n56Dbphogzajmwnb3jwlbV8oWYgK3cdJDhpkdsH\nOTsqavPBRoKZva1MS3j+/t3dHPQvYX2LZ5ZtC92uWBowkIotwhBMKWvC9YclW/DXZNpyiVjVDKIe\noo157mOT9h7YCoJu6jz6149AzaToN2kOWxqYWNgKh4cBsC+6C7/hrm6PezSWw20xHv7ravwZ/7Os\ngt2VNTua8U9oQrSXnkiS1XBbd5IfP7GcoL3X1vvrGri8xnAIZdX2w6jFe61ZvpKmuSNOVbnCU9t+\nnzWeO2o7AAGl/16kEpf0eo778SCnYXzO2Nd1gOe2/4l46thnrpqh8dy2P7Gv6wArNlvCpqUraWkY\n9kee0JJEssIVvTNPj/qe0kDUeLnlCbZE1/Zq65FNTyHkdSH320c0cXRSy3y0UpE1o13ZvKJXmc5U\nF49vfg4hYKX7jnn6uKN9Ny9sf7GXXTxhRhBEE1Po3X5H0iKM4kBvwuiMuTPzbEI2Wd+yyTGneM1E\nctVBNCnBxpatmKbJwaYI3bE0CAa+wZaJ6EB3tlO5NdHGv3z4E/5r1f29+q77u5BKGhytRbXbEnwJ\nXtmzgEc2P9Wr33s79zv/d6W6s3xQjYkmYmqcxpgrCFoSrXadSZ7Y9Thi2DV3AFnaiOBLUhS2JNVH\nvJJVzjv7FgJxx8IiyKqjYfQPV2VdE1XjtEdSrs/D7wpNQVGpa445moFzXNJBcN9B7+wbyX0fssgC\nMIwonQdWEImriMXNCIodXSXoJNLudVnt2f/Xr36SSDRKV6o7q+8ZJOjEiBWgHh5i1eFzZ+OZb6on\npEIrIEA9NArINnF1x9IIfvf9y+qToCP4rHOCZCDJ7jsj5nVnXdNqa3sHuw+xtMPdbEzwWd93T7Oa\n1THrfTH17MndsEFH36PmWJAjjM8Z9617mJWH17KtfYdz7OOcxR81b2Jl41ruW/ewcywvoPQgjESW\nDd/7ASBlCzUx3ElajLBLX96rrbakJXxM1Z8l3PtCMmU77aJWyGRnuqNXmUc2PsWGls3I/Syh6J3d\n/3rD4yw/vIZaj50ZIClYH5Ep9m6/w+6fX8peKdyZ6uKJgw+gDLYitrwzUKmsntfrXubJLVYKbs0x\nSZkgWuW60910x9Ik7HvyCkJnKmpjY8tWDNMgriWIqO6+F2lVJ3DycnwjNtEcs4SLrhuI4Q4CE10b\nf0Z4uPW5Zo7udMTtu6RSn7DGLaJGHRu5LGY7pMViV6MQi5pZ3f2+p62ko1n1RGui3Xaqm/hGeyYP\nctrRMCQhW2TE1FiWJpcxKYFFBLGkmkUIercdLeSx6XuFqSBpDKrsOwrM0KN0HlxBJJ7GN3gbpiFg\nJKwFfwnNHXdBSXn+t+oeMP2byEowizAyGoZmaJiCjqkpmGlLqGa+F8GXIDB+qVufl0jsdvRua11O\nTHPvfWf3dnxDtmeNhXeMvO4OFY/PJeya0JDTNLZbdf732oeoVbdl9SMaV4lrvQnDuX+jhzVA+vQa\nRs4k9TeCtMdubpgm0lHWL/S0lYJlDoqnUwglCadMWnDr9M6SBEnD1F0hk0UmR4IufewCq2TGTyBb\nwiClZ9vldUPnUNQyUZmqJeD7cgKrRnY7qhSxZjaiYc1MbbOFZmh02U5Rb8QLQF2kAR0NufIQWtNg\nkmndIWKxwBLeJiYJLen2QUk5H3JXqtv5WCF7jBJq9nh5TRGdqS7HLNXs8T3s7NjDkNIqVN1ALGrO\nul4saEdvtcxyHzVvYkm9q5l1pSOItnYUnPIe2zy32ZnqpCJUTlLLJhzRQ27+UR9lnUNJUZTn46BH\ny0zvOxnfsC00x1qJhVWEvG5Ev0cwejSMuN3WndO+z71rHiCmxok6zl0zezYuq0SjKkKBdX6wOpO9\nyUNQ0IEgq5iaDyGvC/+Y1VltDanKp7ap94ZTzWtXoMbbePBn38McahKuGknzijUopSbJg1sYdOoP\nqV/zDIZQj2molM0cSPFJFqHse+9nbJ/4bbbVr2bfcxvJG1TE/ob1bB24ih/ffbfVgC5jpizzX6xp\nOy2rX8IUUygb0wy+fCxSyI9pxmnc+CeSnXWIqxNUnjEIfypE9+42/vT4U7znf5WioiLU+WEaP9iP\n5JMoP2UQgpzmwOL7GTD9GyhVm9jx65WEhxQTr+vGd9ZYmjavJdl5COQIReNLqDpjKIKSYv2GjTx6\n3wvsbtmHKIsMu2Yi+5/fSMXkcqLJycRUld2/XUf1haMJ2kQrKGnMZLYJSjDFLBPc8SJHGJ8Cj72+\nleqKMBd8YfDHll27o5n31tXx/740AZ/S2w+wfHMTYAmaX/xuPd+/YgKvr9iNIMCX5libSv158V40\n3aB0RG/BrWoGcbPLEXhr9zTQb6ibF0kIel4WMVvD8M4KAXx2qoy31rqmEUSjF2F8uLGBrQfa+fb8\nsTzxl+2I9uRTsGePqulKt39/YhVCqAsyoeO2SSKjYUTSroCIqjF+984u8kMKF8wcjOGLuqqwpIHm\n44/v70aVXVNLysgmjPakO1MT/AmSac1pSwy5kTc/enoRna3WjNtLCm2JLnYeyq7D6V86gW4YPPDi\nJmaMqaRRd/vRmewCe01aoydQYE/3Ps5ltkVOerZGkJkRNnXEeXbVe5AHp/afyYcNK+hOdRNIZZtw\nMuhIdlERKidhE7PW2h9faTOGp6+mJiPYBB4S84kF4hT4fM4MX2+vwIhZ791bm7YxavoUZzast1eC\npCEVtpHUUqRUnX2NbQiKyLLVUZIb57DCFFlt2DNpSSO1cY6nbQXDMKCznOSGOew2/OjmaLSG4Zj2\n7NdMB0husK6RShoRC1sZXFUAG9090zOonD2BZFOCC274Aet5lc51PhLNbQz68jTMhjkYUeh/6lxC\n47dgqDq7H11HwUjL1yQIAjvTFjGl2xMMu2ISgeIyzPc6efbFV2EwmLqMmbSEbv6IPIK+m5FKGoh0\nvUnj2+30O30cTWtWIPoGMmTObfgnLEZPpkltTlP3wQ5GXDeFSmEW3z3lbH665efW/dn3KfhS9t8E\nUkE7qbYEA848jeoL06jpA5TVnIevsgNl2Eb2PrmZxKgkgao2nv3lvdzz01/wh8hLpFIJ9JbhlExp\noWvPFnYWnsaqvRswdYNgZRhTlxAkHd+odSTXneU8Y/XQSIoGtmWZ4I4XOcI4TpimycptTbCt6ZgI\n4+FXLTPDtgMdWautM2iLxsgQxu66LhZtqGeJ8RSmIfIl7sU0TRassGznlw/pHVmk6gYJwRVcta0d\nxPpZAr60wE+XR033mgPAQLRnhULm5bZZ509LNxPIbJ8t6r0I46mFlhlt3qwh2StqbdVX8xBGXUsM\nqbKOjDFEsEkrE6FUH3UFRDQd4911ll3+zMnV2dqRqGHi463Vh5BKDuMbYR1P6T0JwzWHCZJGMq1b\nhCEYCEGXnLrT3UAZYKIM3OVWIBi8unwXYAn3LA0jneJwa5yt+9vZur+dorFNYKdC6vD4Cxq6XZ9A\nl+2c13TDGZ9Qqpq4v85x5m7Z105STyIBs8pO48OGFXSlIwiq3uOZ2fdo15mJiNOaBpNXHEf3R7F8\nVgJmOoAgW/dbKJYTV/bhl9IOqZu6jJkIY2oKCV8zb66qdWamelcpYoF1D3E1wZZ97cTVJAISopKZ\nHRhoug6IzkTE1BS7jky4qfXXMHCseYKoW85eUUQ3PSHO4U5KigVkScA0rUhVpWkcauVmRJsIu9Jd\n4ANT9REor8BXFCDVbOeSalzHzqUbQVNIR1Ko3d3IFbVWqpRoAUIgSaikADlcjq5ECBcNYMee/UiD\nsTSMZAgVe2coAAAgAElEQVQjGUIz66lb9Ti61g5SAtlfweDScvbu66BqwjzARPSlkM0COjtqCQ8p\nwlcUoN3YwMrdE62bMUFrGoSpg1RxyHr37O/QVxBEjp2BEVuBmNdN5PA6utd/CIKK2mmS6ugCOQ+l\nSCQiFqGhQrICvWUoRSfvo+n9j6gfGKH90DpKJvWjguHUbqsiMG4ZgqRbwQ32M9ZaqvEPitOuu+/j\n8SLnwzhO9Ey/cawwj7RAX8wWCM2dthAXrY/Jigyx0BrrnUBN1QxU0bXPIqnUtViC4sJThhIMecxT\nGR+GqBOY/D5SiW3zNi3C0DQDwzSz7bWSSxh7Ovez6NAypLI6lOEbaenwmEQE3YlL1/EIDUCyTUHW\nj8yqdOt8c9wN/Yx6/AD3b3jIcS5a11njJPgS+Ea4TtGeJikvYZAhDN1AGbTD0sJMwa7Hnvn540i2\ncDRVW+OwP24hrwtlwF6nuriazHqKpmchmtdG3p5w+xDXrdmdphvOhyx1DrTqtwV0bVPENhdKiIYP\nRZTpSnWRTGsIHvtzRdCacOzptBZ/ZQgDTYZ0yNIoMuYIu63/N/kmCgQr3Uta6XDMhugKgyrzMbqL\nEf0JyyeUaUtXQLPGIqHHiSbSlilJV/jSmSMITFxMYOJifKOsSKHAycvwjV6LGIoQmLgYuf9+EHQC\n45ZaPht/FLGgzbnu9usHMe2sJue3MmgngqTzSsPvqShREATQO8vpPjgAvbMcMa8bQdSJaDYpaz5E\nyeqff9RHjCnvJNVcx8hvT2XIWTcQrCjC0HR8Q2zbv01oAbEAIxFGEE1SZoqUaj3nYZWlgIDRVUb9\nwu0U10xi+BWXUX3haEzN4LSRYyw3V7iLqgofCAZjq/sxwJM4URAN3k09ab0XyUICYhCtfgSCaODz\nPBdBCFptRYtIdyTorHuHEdePY+T1c8grr0HrtFaoC5LGa13/a9Wn+UD1kxcIkz+8hGjzRiJ12yke\nV4nZWo2ZyEdtsFauC/6EO8nQFQrkEnxS376rT4IcYRwn+lrcdSS0JToITPwAsbAl67jXuS30cEQf\naM+2cze1xwETqayO2m7LKWwa7uPTdANN8URYSBqHbDuwTxadWSzgkJPgSzjmCsg4lU10w6S1K5nt\n2/BoGL/66H95cfdr+IZtQS49TG17b4FuNYBr/pJTiIWtGCnbqZjRMOwxaEm4dURSrkbRmMg2TWSE\nrVdLKPIX9iKMTMy/dY1KMq2hGabjP0jvH2t1K2Bf57H3aq22GcMmE9leTJZBQku6z05Oo/pcYsi0\na5om27QPneMZ56Smm05biYjlx8kQxsHGiDV+ukxKMygJFNOe7LSc3vZzUvQwd07/PmXBUtY1bSCu\nJhyflqnL6HEr/YMYtLQMQUkR0ssYUTSUAsEimqjQ7BCQqcvkh3yOWapN2YNUbI2RqfkcX1Pc7Ka2\nvc0aE122khp2WvZFQUkC1n2Zmg9Ts7UyOY3cf58zjqbmw+ioRK21Yk3rY41Zi+wyaEk24Q+a9jWW\nEURrGYDokzH0JHHDevam6sfUPUYSqRMpKCPKIsm2CPGG7KALQdIsTSqlOSlDkkYcVbfGNuy3828l\n8zBSOr5CGUFWad/QCKbA2LIa8keU0LlvFSm/pVH7NR9SuJrI7iRp22el2d+JEixGSDRhJMLEGyJE\nOzsQ82yys18fMxVET+mIPgnRL5FuDBJr2YnePJBgXn/UaJp4g/Vd63ER0zQZVjCUkmkVtGx/hdCA\nAqSgQn2T6tQHOBqGaYhgikzNP527Z97Za6w/KXImqeNEz7QaALXddeyvTzBr5HAUWcQwTdbvamWX\nsRTBl8I3YgO6foZTPsuM4o0aCUY4rLaR8XRs3NtMR0RFzG/HN2wLhzPWJU8UxKY9bZgFUTAES1BL\nGrV1liaiyFLWLNgRut64bDUAStIS8IZMXXO0F2HEEmqfC+0aOj3CWe5hOpE0MGTE/A4E0URrHYA4\nYK9DJJpmsGxLA+vbXHNQdyoKFPRpt3c0DJsAvzr6Mt7Zv4yo1sq2/W3UHe6iJXWY/V3uoilkjb31\n3fQvy0OQVYxYAUbMcjTkF+qk6tyxUA+NtGZyeKJNMuTWVerY8zNrVvwnrcAUVcsMI2mOZlPb1kZM\nsEjQSAVI+ZNEEylWbWtyxj/SKRMwcQiktjlKoFrDTPtZta2J4tJimuIttGh1jqCRuwdxoD7G2KKx\nLD68hHV1u0lmggt0mURHPkopSOWHEIuaEUSTZERm8YZ65GQZpilRl96NMtg1H+UXKZjddhK+4u3O\ne2dqCoYdAXQgtpe60IcgWOGaq3c0kd49Cd+kd0FWEfwJS3NTfZjpgKUlFbZhJj35izQFENC7SlGA\nDc2b2W1rST1hZhb3ZQhB9SGHFEJVFax78vcU1BTiZwiioZA+WINv8A4aBx4kujRG08OrkcVWAkWD\n7f6bCIL1jI2khKabTnRVVypCLKVRSMhJRGimglSeMYT6hS+jbA4QGlhAyhQp8hcy8NRx7PvrWjb+\n+SEQBUZdWU1Mq6by5MvZ/7s/ASZyno/h10wkXF1Dqm45+xc8R94IEX9pyF33YgduGKkgwUFhgv3C\n7HxoNUqgjGDxUECkIDKRwZcdpv4v2zE0A8HMY8DkGkYVj2Br/21IAZGSSVY+MkO1EwymLcKQ++2z\nfJP2+IV8PvJ9R89BdizIEcZxomfiPt3Q+fna/wFg+66vc+NFJ7N002GeXriD/hNbwQcYEgnPegBv\nDLXgIQwrB46LB1/5iEElJb3CYREM8kM+IvE0O+vbCFRGMOMFVtiepNFt57/xKyKm5PVhZMI0M06x\nUYihbqTSRkfAb2/Zh1LtWWVraxjrdmVrSQDN3RHIiJketnZB0jBVV8CbSVt9t4Xwxr1trGhfhNLf\nDQWNZKI5PGYYPVKElN/pjpMtZNvadRpbU0gFGnc89CEgIA/cgdLPQK0bgVK9B0HS2LCnlQ17mwlO\n0zE0BdNelCVlTFKOTV9xZtWCkrQCADIhlgemwIS3SeopVDsiTAzY8fSyiqiFaEtYZq3/+v1ylLGg\nNVdbZORP8r2H3wfVj2+MhmkIFuHrisfcZIKkYep5vLu2Dt+QBFIFNBa/j8+ORu3qMvn579ZTPcKA\nEvjj7j9jyvYs3RTRuotQALnM1czSCZln3twJgDK8nEhpI2JGjusy4aAbTpoFTcFI52OqCnKFu+pY\nDEb57RvbMUxLQImhCIEJS6wuaAoYMnpbP+sa2+8gdw90TJ5mMoxihhyyEBD4cuVNPL/5dacdof9u\n0HA0iAyJV593CmJBO4KskvxoNGd8fQ5rDlkmp6SQoPqrJxEQQ3SsPM1qt3wHcIARX/4acngHkhxm\nyJzbMFWLgMMnjySUCgHbCSlBQMNMhigcX05hjbvZ0ujpVwKQ5ytm0KUnOcdPH3cm+Wsklqo6pefN\n6hGZlMcPf/Rz/ufljwhOfTdraIecfROyJqLb79qgS6w6UzunYHRZ7QbS+YxWvkno238EQG0Yilbn\n46Sykfxpg/Xe5o8occcd8JsWKYghWwuzAz0CviMvuP0kyJmkjhM9NYxme/EUwOrtlkq/z07Z0RW3\nPhrTELPWA8Q0T+RSH07NDARRo7Y548j0HJd0CsLWiyLmdSGIJka0yI6McV9cRRYxPITRU8MwNcX5\nMDPn6hPZi9MyUVKxPhbvtXt8KkLPWO8+2hJN2THBdcfSjo9CShTjl3xE7Ygp12nXH1oH91lfdwQw\n7HmPTUKZ64yovZgv0yfJ7QOaYglYKZFdRpO5cJplMhF8KQZV5CMoKUxdol9xIaYukVCTpDSjlwYk\nqnl0pSOkdRVDsAlS87k+EZs0BVm1Z36C7SBOO/0XBNOZFRqpIL1gR1jV19rCV/aadATQ/Jjp7DUp\nGQIEMDqzd5zLmKRMtTdhmLZGYESzU5+bhuSkIDHV7Igv2bTqccYea11Ov9gpngpERhinuj8xmTVm\nEN+ddhWTS6xlzo3avqz7dbQ+XxLBH8eI53PV2aO5ft4Yrj9nalYf8hU3Q22GCH1DtyJIumvCsutD\ndgMA8ny2hpHuPe43zLOiP4JiftbxylAFXzt7FP9x7TR8PVwERrSIQRVh9/304AeXTWf2uH4Y8QKM\nuFun939ZEqgqDZHaPg0z7XdCrzd+uJbdj6+l6qxhDM0fglQ3BQyZorCP//z66VntZL6THGF8zujp\nw2iI9g4DzJCDKGUMltmE0Z3yOK+dUNc+nOJStiD0oiDfeoQZk4UeKbY+fiXtCEFJBiTVTQbo+BWs\n8yWhsPsh2W2kNDvssqvUmg3bGkYqbc+OPfDaoTMLpRw7dg+NwNR8Vqih2Nu0JR6YSZG/iC579ud1\nzJaGM05AeywywlfzuStae2hOVeHSrD44JKpbgtBn5qEKcfucq2GcNtZyHKKkGFQVRvAnMdMBqsvD\nln9BT6NqRtaCO61xMELamrZ/cOhDN4eRprjCTk4jD9yBGIw54ZboEoIvbZmQQvYCxcysug/CyNj0\nzXQAI9l3umovQQCOoAHQu0t7FBYI+qS+NQy7jxnzHcDs/jMYFHfNqpnV1hmU5RXafXClp5EMUxTO\nlqaFRjUPnH4PY0tr+GrNZUiiyPjhpVQXZffPebaagmlaa1YEAcx4mJrBxUiiyPQRgykLlCCLMpMq\nxnN6v9Pd6+PZAj4gWfdZErLfJ1l1vpOwL5R131747b1A8noQRnmwFJ8iMbgqH9MT7ZXeOx40n5OO\nRu8qRcKtt6a6wtJeDYnUllMY4B9ivZeeZyeJApUlIYxIKckNZzghv+ef/0V+/L8/Y9bsU7l50nVU\nYKXqLykIUBLOo+DwaaS2zbBuJWb199PuZZJBziR1nPDmBDJNk4Zo741aMoudRMkua4hZSe/aeoR+\nWoV7C1JB0qwgyZ4mKSAUykT72I7PVBCjuwQpvwMxvx2js5LX619yzgmBBMqAvWjNAx1tYES/MtbU\ntme1ldRS4AO1biT+0WudKKmkpiKIJnpnGXpbf3zDN3kcxiZyv/2YJugd5cjlDS7ZZcpoCromZt2n\noKQQ1TxSSYHSQDFN8WYQNQTbOW9qClXFhXR67tNLQJkP3Aq59Ttj6RdDCAh9ajkAQTGPLpoAN9QV\nXabAlw+m1a+64GKEtIqp+qgsCUKHhGqmSau6syq3Mj2eA7X9CA6tBaxEiMpAPP2zSF0qbUSusHwr\n7sI46/n5hm51H6pNGHpXH9sVO2s4BFJbZiGGO/HXrHVs8pBNGKmdUzBTHmJRA4wqqGFXtxUSbSbz\nrDDqPoRkpm+GZ0+Jr9RcxsPbNwMtWWOZQUAMuved6U86QFFpNokZpokiynxnwjezjs+pnoUoiAzM\nH8Db+5ew0dGIBNAUh6CMeIEVzAGIgsh/zPwXTNNEEiUi8TTPYq3OHhQeTO0u3VnAWJIXphsYUFrI\nLl1CUNKIpjVpKA4UAJZ14Cv9b+Cpv+5AGbKNEZXucwhIQSezzpSKCUiiO26Z8GC1YRh6mxU4kVnT\nlN45lSvPG41QVks0HUUUxKy8bFcN/xq/fnkzCVxLgCSJR9xPZu6g05g76DT7/oWsv/5UBUY0yuDO\neezYZ1kxPqudNXMaxnHCq2E89PJmlh305sI3eWt1LRv2WGaqhO7amJdsbOAHv1nGDx9exs5GTwqM\noxBG5pwv4LaZSbFQG7TTPkiuQDZsQZMJE21OWmTmnWlKth0YYOzAKlcjsDWE1qgdhaTLljARdeJJ\njZitTZi67NEi3BBOMRTB6C7FiJTY5/oQ1rpkCUw5TSaSRzaDpFSdPMme+fkTjoYRkP2UBSqsuPxQ\nt3WNrWHEYgJkZqGiq2GYhoBf8hOQ/S5ZeUgLICSFARPfiI1O/itTU5BECUH3I/iTHEpbfhwjUoxf\nkZBQMFDpSHXgG24984BZCAjEunrvlmZqiiOwM2QBONpeJmIo65oMKegKqe3TUBuGec65c7zivDyM\n7jLmyNeS2jrTLWObpExDdOzhXnxp6JepqL2MxPrTMZNhR/CqDUPRGgdz57Tvc8fU7znlja5S9O5i\nrj3pq9b9emar6r5xaI2DLU3UhALJ3r5W9RKGv1fK7SPt6xGQA5w9+HRqSkZy3divY6ZDVBRnk5Bp\nWuG2smffB1EQHeHtTTJZXZ6P0VnhaGahgFWmqiRkmQP9CcTCVoRUPpVhV7vpX1COmQ6S3jWF2cXn\nO8eDinsf3zz5qqy+OyHzpquBC4Jgj69AMmUwp3oWXxx2DuDuJW/1J8+a3HggScIxbUCW2aUv8zez\njsqnF4Lmd+r6LJDTMI4TXsLYcOgAgWKPM1jS+OP7vdMyI2pZi98+OlBL5vtyBGsfZqfMMcVnkAZS\n26chV1o+hiitWAI0EyapYCZsf4Q/AZjEtThiogitYTi+/vsxRZ3+VQqqItEFDC4rwUzYaQWCEaev\nAKP6l9AgKhiiRgroiEUhz1LfC0tLaAIKC0UKggaGqNEMmKmQa4bx234a2TaJ6bITIRKc/L6V7E0A\nn2l9GPFuxel7hoiCcoCg7MfszkMq6CAw5V3XBxLVPFFNacyEPV6GjF+WCEgBYpLttLZJZkBxMeWF\n5ZRVRjncsMddh4IrkAv9BXTq1jM1Yvmoh0YhjxbxC3kkpC7WR62QWb2rlCADgE6MVO+Pe9zgKjZu\nVjENwVmfotYPR++01kQU0p90ohgj6AkB9chSI1JKuTIQv5yiRavP0h5GDypi5dYmGpu1LDt55h6O\ntD+SLIkEfYqTjfULY6vYUduBblQwqDLfycT7pTMMJ6tvLDmKaVVDgGx7uJkOotZamQgQNQomlAP1\nWRqGqAcJBrJFzbFsJBXyy5wzbSBD+xVwsDHCYqx3yegu4dSThvQyczntiQLzZg2hrDDA5FHlmILA\nTsqI0UhC7GDK6GmcMq4fi1f5EPyWGfCU6slZRBgOevrr6eqXJp3G3sV7OXf4ab3a/ebYq3h++4uc\nVDwJIRxkUKU1+fnxNVN5fel+5kzMznA8fngZm/a2MbgqH38fPgZZFKksDvGFkyqtRcJHwNfOGYUg\nwFVnj7Lv3zpumCY3XXwym/a0UlHUhz/sOJAjjOOE1+mdlTAMPLmaTKSyekQ79YaXDARfAqmkCcGU\n0ZN+O1bd7B0JBc7MOUMY86aP5C9rTDd1sZy2BbLghiEaohUWKqvopo6oBQCBkeo57PIvJL9fC/u7\nLRNKcSjsONsy2TIzff32vAk8vHk1TVFLW+mIRyAPJg/vx1nDJnL3ync4eVQeqxvfdrprpv0Y3SWY\nhkigooVo/UhL+NtOVG+4rtLvAAB+wRK2sYgCPpswbNJSBL/luI8VIgZjDlmYhkB7JIUpZaKabHVe\n0jB1GZ8iEZQDCJLtRLcjdvoVFfLtOeN4v7Z3csSM9jG8tD/rmi3C0BqHgO5DlkTyKSNBA43GXkxV\nIb1zCsnBlmTuy+cwdXg1GzfWYkRKHOe+1jDcIc1pNRXUh8vYF3P74kSSYZkZ7vnWDDRjCk+/s5kV\nmvuuDanMZ+XWJg4191jImVmUeAT7gSQKWcJRkUW+deHYXuXOmzGoz+sD/iM4UL3OXc//kh7o5XQ1\njmEZkyAIzla5M06qpHHdaHZ27USrH8k3bh5z1GsvPc3Vyu78+jSW7/LxwPpHOGfw6cz4wjgg25x2\n/qjZ+D0+hnDQoyF5CCPPH+Cn59zUZ5tTKicwpXJCr+PV5WG+c8m4XscHVoT5169NOeI9SJKAKAp8\ne/5Y5k6p5r+eW9dnuZKCALdcNt75ndEwTNN6v6bVVByxjU+KnEnqOOENq80IIjFtxzlnVvKWHsY3\nzLPdo3fXrSorT5NsBDET1voAb8RGFjL1KVabhUHLFKE1WpFD/lEfWZu/2AIZBGQziOCP4x+zyuqj\nZs0mFXsmnyELsGbw6ApGKmjnWXKJK98fxC/50AXLfNSVtMgvpAQJyZaAbI67EWJg29AN2TJlKBGU\nYZusML+MIOuR7hogKFpj195qvZK+wdtRBlob+CiCD0US0Vtck1p6/1jSuyfRGU31zjBqL37zySJB\nOWiNn5J0MuTmS5Y5r6iPlOgZQd4vz90vORMlZJgmJZnNHsBe7CaSyKQf13rPeDOOVL3Ds/+y6X52\niizik+2oqGSIacp8x/4N1kxXEAQUScHXY0/monzL1JPZutbnmDhs34PZt1SWJfFTRc18UgeqpId6\nXXM8W9VeUHUuh1/194raOhaMLB7G7JbxTCw52TmWeV/KxGqK/IX4PTnegh5S/LTb6h4vvCY3fx/5\n546ETD6549nO9+PwT0UYa3c0H/POcQBbD7Q75ZfVr+LB9Y+x6NAyHlz/GCnVE29tE0Y6Ypt1MkK/\n15oETyimLTTDTV/AsGeUYjCWlSJEtGc8gqSBnKbbb4UaFoes8hlBKYatqCKvfdtPHoKiWnWCE32h\nkD0LvnnC9c6MxIgUIShppPI6pIJ2ezcw2UkpoAzZSjRtEUbYZxGGX/JxwEM+Vr9sG3pmEZG9JsBU\nA4693IuS5BiqJWvG2NSH5q0IPkvDiJSSPlhDet/J6C0DMboqSKV1d92Eo6VpmJpHwxCsPToE0UBr\nGkSRYtmqi/wFWe3oHRVkhG2VhzAymkNbV5Iqv0tahm3Gc/ercG1AWtMgpGgV+b48u+5K+xpXewCL\nMBz7sikwrGBYFqH0lagyg4BPptJj4y7Jt94Hvd0itUtGfLHP6yRJ+FSE8UmEF1ihtgGlp4bxyYWZ\npBl07l7/8QWPgJdf+hOplKvdavUj0CNFTAudC1imrAwET7boz5IvdP3Yd9b07rkuHmX/9Z5wNYwc\nYRw3uqIpHn51Cz96fNUxlVc1nfv+sMEp/7udf2ZXxx5e3P0auzr20Kl6NqXxxzFNHD+AQxR9PS8x\n21fR1OheJwSijrlletVkTi+4xDouq/g9+xP0K7EEXc9QSO8aiJ6z0YyJQBEUfPb+CacNmMWY0lFO\nkYxQcyN2rBfvi0MtJ51cUYdUbS0AG5jfD0mUmFHVW6XOxPR7yUnvLkatHU1Bno/UzsluWU1mgDrN\nEaz0CAkFCAhhZJto9KYh6K3V2e1lNAwlZa9lsO7Xp9gaBm5Kc729Etk28hb63N3xUtunkd7t9qsq\nz1Lj8+QQX5w5BIAR1YUUhcKOZpcJLvBubas1DsJI5KEeHENB8yn4MpEwqp/k5lmkdkzP6rsiiRT5\n8537KAhlayleQpgwIjtqKuCTGFLlRjBl9pEw4wXcPv7HzB14GtPHWPcxdog7K5dFkZKCQK/6jxVe\nshk+wHoX+9v5lE4aUuwIuglcSGr3RBRJ7mXGOnlYySdu9+knH0GNt1G37H94+GFrkezvfvcc3/rW\n17n22q/y5JOPAZBMJvmXf/k+3/jGV7nmmitZuHAhL730B1pbW7jllhu59VbLpKS39yO9/QsUBQp4\n+unf8q1vXcOBxffTtOnPAEwaWUY61sbzj/yEa6/9KtdddzUNDVagygsvPMM111zJN77xVR599DcA\n3HLLDezcaUWfdXV1csUV8wFYuPAv3HXXndxxx//jtttuIZFIcOut3+G6667mmmu+wtKl1t4oM0+u\npLtuHQcW/4qDSx5g6YLHiMfjXHHFRYSD1viVF4hcccX8oxKP6DFJfdb4p/FhJNVjZ3bg43eX09zw\nN9GfsNIhZGyits9h6thiNtmLuX1qCWmlHUHWkIUApcUSHYaAronki4VowMk1QbbssGZANcUj6U5b\nAk0IRrN24qosCvOf189gb9ce/njQTcAnKCrhoEI0oeLvQRhCOjOzFcj35dOWbGdcmWsHlkQBvbOc\nfMqJ2CGTGfIaXjSEfKmQiN6F6E+iNQxl9BlW7Pe4spOy9m8A+NcvzaIsXMi6Vh8v77MIRj00GjNW\nREF/H60NFWiNg5GrDmJqlvbgmizcmZR6aBR6exWhYWEUj3r+X9+awdqdLbyyxNK4fvSVmfxqxyLL\nxODJwqrIEtjpHjIRY0a8wJnRF/o9C6aS2TP/fnmVfHvcNQzKH0CRv5DZ4/tRWRwimlBRa2vQmgc6\ncfEZk9SVZ47gD+5eRUii4BAdgJnI1mgAZFnk4hEXsHhjA2r9CAKTJX51y2zASo8+sMJN5zBxRBnX\nnl/D03aW4IBP4tLThjFpZBmSKFKc73cWjQZ9fgRB4LovnsQVp49g+ZbDbD1g+UkkSeC8GYMYNbDo\nmKJweiLgk5EH7kAqaSSWH6BqqDWrrTIMXm1bReUpJqYJO9M6yiCVuLiLx3Z/gH+CvTo56OPt6Cre\n7rFf16SKcVw6Yt4R273pplvYu28vj//2BYJ+mTVrVlJXV8vjjz+LaZrcccdtbNy4gc7OdsrKyvnF\nLx6wxiIoMHWqyR//+Ht+/etHKSjIfg6yJHDZZV/m2muvJxJP88tf3M3y5Uu56eJZfLTgl3ztm9cx\ne/YcVFXFMAxWrlzO0qVLePzxZ/H5fEQivZOBWnDf5a1bN/Pss38kHA5jGAY/+9kvCYVCdHV1csMN\n32D27DlMGGDyx9YVjD3rFmKqzIzRhYRCISZPnsKm9av4+Y0z+fD9vzLo9LlI0pG1vIwyciI0jH8a\nwhCOsiFRX/i4zYISzqY1JigppEQxWo/V0uEwELdCJwdXF3KQ1Yj57ZSZZdZCsbi12tcvBtEAjZSb\nUVP2kySIqUvZ2VptDCjLo6L4JA7qk1m2qd6J9Mns+RASLPu8IsrcOP4b/O/WRkBFECxhmNbTjCwe\n7tTnUyQSKZPx+kWYFftY3vZ+VnsFvgIiiS7MtB+haYwznhUhd9Z77uAzKQ4UMaLKmtUWxz0fph1m\nmZlBqw3DEQJx1EOjUEb3bVM3U0HMVAhZErPiyPuV5hFQ3FTNAyvCBHblEVNSrjlQl/ErIrKc0T7S\nVuJDXXFsw7Loef378D9MKHcdwZXFlmDND1p+ogxZgDXmfkVi3PBS/uCJjpMlIYvo+oIii4SUIOoB\nq62AX3JCUHuGogIMKHeJLeCT8CkSowdZ2oNXQGSISpFFSgsDTsglWEQmCAKjBvbhwzkGeLUFr9lE\nsl7OvssAACAASURBVO9VEAQrd1MmlTnZ35/8KUI8JVEg6Lee2+rVq1izZjXf/OZVmKZJIpGkrq6W\n8eMn8pvfPMgjjzzEzJmzOeusU0kkbN9cH2q/KAqsW7ea3/3uOVKpJJFIhFEjRzJx4mS6OtuYPdva\nr0NRrDFcu3Y1X/zihfjspd35+fm96uyJadNmEA5b74xhGDz66ENs2LAeURRobW2ho6OdDRvWcdbc\ns9mWCoGaxh+w3rl58y7id797jtmz5/D22wu4444fH7Utx8T88cP5ifFPQxifFD0JwycqpD07waV0\n2xYqWInNArKPZIYwZBVREBDkTNK6MvoNHsRBczVSSSO0jUJHdXwOQTFIDEgZSQT7W/RLfuv1TuYh\n2NpFdbg/pw1w4+0VSeG2U77FB6/+GZ+S4ltf+CK/XW9F0fQXxnDR5AmUBIooCRQjmNZaDEEQuOak\nK1ENFcUjMH2ySCJl7fw3INhjNTBWfDxYkSU+z4KjYr8rdMaW1jC8aIjzO19xhWrGz+Bsh6r5SO+y\nzFmyLGaFFRrRAsRwt+MjUGSxVyoW78zdp0j4hSBxOe5ESpmqgiKJ+GSP2c7ug1fI5UtFdGtdWX6D\noyEv2HutBVhC0DsuVjtiVj/7Qk9C+Tj/gN/TRk9HcpZQFrPr9fb7k06eevVBkdAO1aAdquE/7zzz\niOUWbajn2Td30q8qn3/56iS+c7+Vb+rWG77gEPCngWmaXH31tcyff0mvc0888TwrVizj0UcfYteu\nzVxxxdVHrEfXNO6//xc8+eTzlJWV8+STj5FOW0EeR2oXeo+hJEnOam/rehfBoGuefeedN+ns7OSp\np15AFC0TUyqVdgg/U3OG/8eNm0Bj48/ZsOEjDMNg6NBhHA0nUsP4p/Fh6H1klz0avPtX17VEScaz\nBUU0kw7DdmIHFZ8zSxWVNGWFAYdUTE2hUClCNoIIgTiabpA0kgiGveLYFmppM+msp/BLfnyymJX+\n4bwhczllwIzenTVk0tu/wJTKCc5MMi/gY0TRUEoC1uwzbM/sg36JkBKksIfDt7TQ9jvIEiXB3qYT\nE3vmbkieaByyVro6foi+ftuJ5zIC0jtTVmSRoEf4pXZOJbVthpOCWpFEZ9V8xmneU9AGpCCC5K6+\nNhNhEARnbMFNV+FdxPSV6utJrj271/0eCflHIAxJElGU7D7JkpBFTn2h5wrcjyvvHfujOa5TPUyw\n4SP0+0Qi0z9ZErOIsCeZHStCoRDxuJuwc8aML7BgweskEta3aM3UO2htbcXv93POOefxla98jW3b\nttnX5xGL9Q56EdEQBCgoKCQej7No0XtO+YqKSj78cBH8/+3deXxU1f038M+9d2Yy2ReyEjBCEAWM\nAsomNMgiQcKSFKIsVm1Q3BGiCNIifUqr/YHlKTwqlmKlVV7Sal36M6htQUULYl0ALaKCYkggC4Ts\nyyz3PH/cmTuZbDMJmSQz+bxfr76aO3MzOXNk7ne+59zzPQCsVisaGxswdqz2d50T6FVV2he6pKRk\nHD+u/a133/1Xi7/jVFNTg+joGMiyjM8++wTFxdpNIddcMxbvvvsv2B03ljTUu9qakTELv/jFz5CZ\nOddjPzm/HIQEdf1/8z6TYXT0rozqJgHjnY8L9LBvLUqFMfkkyqqrAARr+0xD+7Y/69qh2Ft3CCkD\nTZgWNxifWBxbVzrWBBgagmBVamG122GxW2CUItEAINhkhFkxo1GthxxdqxW6C43HgCtM2HcmAWeh\nZQfNL/JOt994hT7xuiLnKrzz8WnccO1At3Puy74Sez76AZlt7A54X3Ya3vjwe/w4fTAMBoGYE8lI\n6+cakokLjcF31d9DrQ9zK2kAuLKv5uWTw4zux4C2O194iBGzr7sUj2w76Og7GUMHRmHssHiYTQom\npfXHZ9+U4e2PtbuvDAYZE0Yk4lRxNW64Vpvwbn6hHRAdjeLSH/RbcZ3lLNwDhpZhNL1gpQ2Kw/Uj\nB8JkkPGP/zQpid6GfpFmTB2djJIL9YgMNeHAl45V9HbV7ds/oF38w4KNuHHcJahtsKG8ugH9Isw4\nXlDh2N/EFfhWLRyJ4wUViPOwwKpp37dW7mHdbdfi0LESDLs0BufPu/YM6cqAMbh/BDLGDsS1l7d/\nf78zAzIoknv208kyFRERkUhLuxq33bYQ48Zdh3vvXY5Tp07h7rt/CkALKOvWbUBh4Wk8/fQWyLIE\ng8GIX/96AwBg7twsPPzwcsTGxmHLlm149JbROPhlMa4dMRBz5mTj1ltvRlJSfwwb5vp3//Of/x9s\n2vQ4duz4PYxGIzZs+A3GjZuAEye+wdKlt8JkMmL8+IlYtuxeLFq0BOvWPYp33nkL11wzps33MWPG\nTKxenYc777wVQ4ZcjpQUrXbZoEGDceutufjt//t/UIUEUTAE98y/1vE7N2LHjmcxffoMj/20cNpl\nMBpkZE1qPxPpjD4TMOwdDBhNh6TCgo1AowrVUacJySfRqDrKYjvmHAySAbPHXIG97wNh4SrGDkvA\nB582aIvpVAVGgwwDTIChEjZo6WqwYkY1AHOQAaHGYJxvOA85SCuJ7RwCmjUyDc99qU1sR5paHytN\nv9p1335yXBhyM1suakrqF4qlmcNbPO4UHR6E22+8Qj/eMOVBt+dzhs7FocNVsBamICjW/QP/iwmr\nUdFYqd+R5KSViwaCJDOcMz4hZgNuv9G9fUaDjBCzAXfPc90jP2RApB4wjI45jFszXGU0mo+qhAe5\nByfn4rembWotw5BlCbdmXI7PvynzKmBIkoRbZriX8zjwZTEaLPYWGYZzTD9nyhC3x785XYHf7NJq\nGzkvnsMujcGwSz3fOdS0nERrQ0uDkiIwKCmixW2YXRkwZEnCzVMv83hecJMMo6mLmcN47LENbsc5\nOQuRk7PQ7bH+/ZMxdux4/TguLhxlZdWYP/9mzJ9/s/74ZQOicNkAbUj1jjvuxh133N3i7w0YMBBb\ntmxr8fiSJbdhyZLb3B675JJL8ac/vaQfO1/vxhtn48YbXZP5kZFRePbZP7b6/mbOzMS/votCeVUj\nJqa51vwcOfI5rr9+GkJDPe9pERUW1O5n/WIwYLShpt41BilLEiRZdZQB1z54+hyGI8MwyAaYFCOC\nDWZUWbS7Jupt9XoZa5NRhkEKgiQJ2GXt22WoY4evIKOCEGOIXozQVurKDgaEuYJBRBsZRncINgQj\nvOpK1Kv1LdYGRAZFtJr9yJKMX123Fke/rcCfoN3RpLRykfNmYri5pkUcASDM6Brisl+Id5UfaSXD\naHUMv5PXMOeF2K4K/XZGp7aGl5rOJ3h67821ty6jPT0xJBXUZsDoMyPhF8dxyfrd7zbho48O4skn\nt/Rse9CXAkaTPbj3HzmDlIRwpCS2/MYuhMAbJ97BB7YPIIeNhloTjeo6K2CyA6pZ2zcZTSe9HUNS\njrUNEaYIVFq0Mc16W4Ne9MxkUGCStAuW3VAHBUBEkHaRM5sUfW2EWh8CUedaHxAb7PrW2XSSuic4\n747pSOXLaHMUgg1NbkFu5SLqaYiitQtM8zH60CYBo+l6ioSQJsX3HHNMVbUtV5o3L/zmrbYmwYG2\nA0bTi3dHq4h6muNoS2cDzcVoOiTV1MVkGH2B89+i84q1YsWqnmtMM30m1NubFK/Z+dZxvPHh962e\n91Hxp/jn6X2QjFZHZVSgqs4CSCqEKusZhr4VqbPOk+NiHmkKR621DjbVhjpbvV6O2qBISIrSAkGQ\no8rpgOh+UGQJ/WND9Q2Y1NpITBntWk0sSzIWXp6Nm4e2vBOkuzk3u4+NbGXvhHa43XrZygWvMxmG\ncyhh2mhtTsOstFzwd/nAKIQYQ/QsbdwQbf5mUFIrmVonr2FtTYK3J7RJIb6Oftt2ZkfhIZ3LGDob\ncDojMtQEk9G1SND576Z5JkbuenP39J0Mo9mQVFVdy2+ZAHDgzMf6zwYDYAdQWdsAqZ+AJBRXcT/F\nimnXDMC732jrAYyKI8NwLAYrrTsHi90CYXUsvpMkJEVG4kgFYIMFI+PSMOuydMxcriA4yIC3/q39\nK8m6ZjRmDnatvgaAHzW5lbYn/XTWMMwcl4Kkfh27JdKt5EJrAcPTraetPJ+SGI7fPTCp1QvnM3np\nsFhVRDjWMeRdcy++vXASI/pdgYUTbK0Oz3T2M9pehtHWIGjTINGZfQqeWpEOo6HjLX4mL73TmVRn\nBAcZsOme6/Ry448vG49Gq/2ib+vtK3qohFW7+mzAaG1hnipUFNWc0Y8jwhSUAaiu14afTIoBjfq2\nmlaMuiwW755wZhiOW9kM2sX0bK1294yzrpIkAcFG1wRs7ojF2i2pjv8Cd111Gw6e+Q+mX3pdr/1A\nGRTZbeWxtzxlGJ6+ZbeVgUQ0Wdg2Kj4NpxtO49p+Wplqc5M1b0GKCVc6VrV39Vh+cHs1mbz4wHcm\nYISYO/ex7apd1zoivEmpE4Mic/7CC66Pf++LGH0nYNjdO7+2WcAQQuBc3Xk02i0wS2FoEDVw7pVS\n3dAIAwBFcmyPaTVBMjZiQHyYPodhUrTnzAYtQJyt1Uo06HWOmq0JaLp+AQAujbgEl0a0Xk7a3zXN\nMFobjvC0uYs3m78YZAPuGrMEZWVtlWloX2djdHsXYW8+7ryAUlt6X7joSwGjWQH+2gYb7KoKxXFP\n/rbXv8TnZV/ANASIkhJQLGrgqAQAq2prGTDMtQgJUiDJzoDhvgjvrVPawh1nwDAbFZQ3qT/VlzS9\nM6q1DKM3jGl3tAKrU2hwOwHDizGFjlQhpb6hX4QZZRUNCA9ufYOonuTzgLF//348/vjjEEJg/vz5\nWLZsmdvzZ8+exerVq1FdXQ1VVZGXl4fJkyd3eTtau622tsGm1zb65OsyKLGOrVCFNrlrNDqW6jdZ\nawEAsJkgSY6tVx0BI8jgWEltcJ8QXnx9Gi6cicDll0QBF5IAoNUKr4HMLcNoLWB4uGh2xxDd0IFR\nmH1dCkYPbbmlaXuS+oUi+0eD9HpOD908Er/9y2GPv7d68Sh8W1jZar0o6tvumD0c//jPacydeGlP\nN6UFnwYMVVWxYcMG7Ny5E/Hx8ViwYAGmTZuG1FRX0btt27Zh1qxZWLhwIU6ePIk777wT+/bta+dV\nO6fVgFFvRUSIybUK3BEYZKF9iA3O4W5HUHBmGMFKCCwAqi01gKT9jp5hKO4BY2hiIpKHaIHiipjL\nsHrMciSHJnXZ+/IHngKGp3jQHd/BJUnCj9NTPZ/YijkTB+k/jxgUg4HxYThdWtPupOXll0TrQYao\nqZgIs77TYG/j0wHUo0ePIiUlBcnJyTAajcjMzMTevXvdzpEkCTU1WgmDqqoqJCQktPZSF635HAbg\nmvgur3Ls1OYIDJLzVliDM5Boj+sbGtm1eYoaa22TDEP7HXOTDCM18lL0D3Wt1gSAS8IHtJi/CHSe\n5jA8DUn1ghGrDvGz5hJ5zacZRklJCZKSXN+mExIS8MUXX7idc//99yM3NxcvvPACGhoa8Pzzz/uk\nLc3nMABXwCi+4Cho5pjA/vpULUypgKw4Aogji5BaCRh6kHEU12saMG4amtVr73jqTp7u/fc8h+Fn\nfehnzSXylk8DhjeTfvn5+Zg/fz5uv/12HD58GKtWrUJ+fr7H34uL81yDvqngkJYLuyRFQVxcOGzf\nOfZWcFz8nWXHTWatrr/z8eEpcTh9BLhueAr2lR2BMFmR0j8UZwAMHhCLuLhw1BtdpcFTkhIQE9yx\ndnZGR/uiu9VYXcG6aVunXjsQ+z45jauuSEBkWMv/PjdOuBRvHTyFMWlJ6BfZflG+1l6/pxgdE+hG\nk9Kj7ekNfdFbsC+6hk8DRmJiIs6cca1rKCkpQXy8e4XLV155Bc899xwAYOTIkWhsbER5eTliYtov\nxNbR2ycrK+tbPHa2rBplZdU478wwZOdeDY7yHxYLQs1G1Dke7xcWjGcfmoTvq7/HvjKguPw8RgyK\nxJkCQG1UUVZWjfoGV8mKxiqBsprO3ebpLWdhtd6sssJVkrppW5dMG4KbJg+Gpd6CsvqWCylzJg/G\nvOtSoFpsXr3H3tIXNpv276Wx0bt2+0Jv6YvegH3hcrGB06dzGGlpaSgoKEBRUREsFgvy8/Mxbdo0\nt3P69++PAwe0vRpPnjwJi8XiMVh0RmuT3s4hKYujLpHkGJISqgFCAHZhQ2iwUb9LyigbYTIqCHPs\n81BtrYXVsamSwbFwz9xk0tvQw7Wfeou27oKSJMljjaOeqIFERK3z6RVNURSsW7cOubm5EEJgwYIF\nSE1NxdatW5GWloYpU6Zg9erV+PnPf46dO3dClmX8z//8T5e2obSiHmFmY6tzGM7FexbHN0LnXVJQ\nZUBVYBM2hAUbcM6qfft1VkR17vNQY6lBiFFb2e2sJeVcuEcufW2tQfMd04gChc+/AqenpyM9Pd3t\nseXLl+s/p6am4qWXXmr+a13is2/K8NSrXyA4yICMMQNbPF9Tr627cGYYzklvqDIgZNiEHZEhJkj1\n2oK7CMd+FGGOIFFjrdVrSDkDhizJkCAhOaxv3TrbntZKmgeyhJgQnCqu7nCRRqLeLqDHTM5VarfL\n1jfaWpTDBoAaRwFC5/af+qS3UABVhk21Ys7ES1H+5ccoBRDpKCyoyApCDSGottSgzlYPo2xw26ti\n65QnfPiu/E9fyzCW3DAUybGhmOqopEsUKAI6YDTdx7uhlYBRGvI5vjwXAoujUrnzFlmoMoQqw6ra\ncGliBGKLJZSWa3tdOIWZQlFcp9WLuixqsNteFbLE+kBNdWdJ7d4gLFjbgpYo0AT0lc3WZKK7+Q5t\nMDagMeobbDv6vGsOo+mQlKrApmqRpNJShSDF5DY/Ue7YHQ8ALo/unasye4u+lmEQBaqADhhNM4wD\nXxa7PRfUpISPPofhzDCENofhvAOqylKtz184OSe+r44dgWmXuM/RkLu+lmEQBarAHpJqZx/vsHAJ\nztUBFpsKJeYslIhyCFUCIOlDUnbVjhpLLeIj3YvS3X3V7ThR+T0mJ/fe/St6C/YPUWAI6IBhs7e8\nldYpOFi4AobVDtOQIwAASXYEGVWGKlRcaKyAgEC0OdLt9weE98eA8P6+aHbAYYZBFBgCfEiq7QzD\nFOya09DnMJpy1IYqrdP22o4OiuraxvUhnMMgCgyBnWG0MSQlRxfjbJhrzwJLK3dQCUd5kMJqrbRJ\njJkBo7MYMIgCQ4BnGK0PSRmTvnc7Pnu+rsU5wqIVuztZqZ0bzYDRab1hRz0iuniBHTBayTDWLBmN\n/v3Cmj3qfp7JKGP2tcMAACcqTgHgkNTFemB+GtbfPqanm0FEFyGwh6RayTAGxIUhvCwIxU2315bd\nh6TSBvVDapwROAM02LXV4lHNJr2pY0Zd1rGtT4mo9wn4DMOQ9B2MqYfhzCIUWYLavCpcs4DRaLMj\nxuzaPlOChBCDd/sxEBEFqoAOGFa7DcaB38DQrxgwaHWjFEVCjbXG7TypWcCwWlW3gBFiCGa5DyLq\n8wL6KlituDZvks11SEkIhyJLqLI020xFsQNCm5iV6qKxaPplWikQx94WIUZmF0REAT2H0ShX6T9L\nQXVY/9MxsNitqLc1wCAZYLUCksGmD0n1D03E6uuX6xsfRZjC0FDfoO95QUTUlwV0hmEXrm0/JbN2\n62xFYyUAYHjUlbCVpGjPKTZAEgg3hbntkucsNhiscF8DIqLADhiSVf9ZCnIGjAoAQIw5ErBrq7kl\ng3ObVfeES5G051Vw6zQiosAOGHAFDNlchxMV3+O7ygIAQGxIDITq2C/aETCMzQOGrD1vd5Q5JyLq\nywJ6DkN1ZBjCrkAOq8T//Wyb/lxCWAx+lDYQh6q/ajPDMEjasV20XcSQiKivCOgMwxkw1NqIFs9F\nm6NwSZxjMZ7SesBw1o+KbLYXBhFRXxTgGYYNQgCiLhyIuOD2XHRQJIIUbRclSR+SMrqdM/+yOTAb\nzMhImdo9DSYi6sUCNmD885PTsAkLJFWB2hCqPy5Bwk1D58FsMMNs1AKEpGhzFAbHnIWT2WDG/Mvm\ndF+jiYh6sYAdknrpX98Cih2SasSg8MH64z8fl4f0AdcBAMwGxz6tbWQYRETkErABA3BkDnYFa3Mm\n64/FmGP0n4ONQY7zHHMYknuGQURELgE7JAUAUGxQHftaPDZ+FSobK2FSXFlEkJ5haENSzDCIiNoW\nwAFDhSSrUG1a1pAQEoeEEPcS284AoWcYSgB3BxHRRQrcISnHRDbUtoOAHjAcGYZz3QUREbUUsAFD\nMtcDAISl7TpQpmYZRfOV3kRE5BKwAUMO1kqYq3VtL7prPmfRfOEeERG5BG7ACNEChqhvvn+3iyIp\naLr5HjMMIqK2BdwV8u8ffo+TZ6ogBWu76rWXYUiSBKiKtoESmGEQEbUn4K6Qr3/4PQAgaLgNQpVw\n95yr2/8FVWbAICLyQsAOSUFSIUPB2GEJ7Z6mlzgH12EQEbUncAOGrEISXqzcVl1d0LyWFBERuQRu\nwJBUSN68PWYYRERe8XnA2L9/P2bOnImMjAxs37691XP27NmDzMxMzJkzBw8//HCX/F3JywxDbrJY\nj3MYRERt8+kVUlVVbNiwATt37kR8fDwWLFiAadOmITU1VT/nhx9+wI4dO/CXv/wFYWFhKC8v75o/\n7mWGkZoYjZNV2l4ZvK2WiKhtPs0wjh49ipSUFCQnJ8NoNCIzMxN79+51O+evf/0rFi9ejLAwbb1E\nTExMay/VcbI26e2Js2ItwAyDiKg9Pg0YJSUlSEpK0o8TEhJQWlrqds6pU6fw/fffY9GiRVi4cCE+\n+OCDrvnjkgrJi4BhNrgCBjMMIqK2ebxClpSUICGh/VtT2yKaLqNug91uR0FBAXbt2oUzZ85gyZIl\nyM/P1zOOzhGQZAFZ9RwwghRmGERE3vB4hZw/fz5GjRqFxYsXY8KECR168cTERJw5c0Y/LikpQXx8\nvNs5CQkJGDVqFGRZxoABAzBo0CCcOnUKV155ZbuvHRfX9gpuSFqgMshK++cBiC5yPZ8YH6Wt/vYz\nnt5jX8K+cGFfuLAvuobHgLFv3z7s2bMHv/vd77BhwwYsWbIE8+bN8yoDSEtLQ0FBAYqKihAXF4f8\n/Hxs3rzZ7Zzp06cjPz8fWVlZKC8vxw8//ICBAwd6fO2ysuq2n5RU7f+F3P55AFSLK0CcO1fj8e/2\nNnFx4R7fY1/BvnBhX7iwL1wuNnB6DBgmkwlZWVnIysrCZ599hry8PPz2t79FdnY27r33XvTr16/N\n31UUBevWrUNubi6EEFiwYAFSU1OxdetWpKWlYcqUKfjRj36Ef//738jMzISiKHjkkUcQGRnZ6Tck\nSYCQtYCheDPp3WRIioiI2ubVoH1RURF2796NN998ExMmTEBOTg4++ugjLF26FK+//nq7v5ueno70\n9HS3x5YvX+52vGbNGqxZs6aDTW9JCKFVn3VkGLIXGyIFGRgwiIi84fGKevfdd+Obb77BwoUL8eqr\nryI6OhoAMHr0aOzZs8fnDewIu6rNXUgdyDDMzDCIiLziMWDMmzcPM2bMgKK0vPi++eabPmlUZ9nt\njruy9AyjY3dJERFR2zyuw4iMjERdXZ1+XFVVhYMHD/q0UZ3lzDDgyDAMXgQMs6HtLVyJiMjFY8DY\nuHGj2x1RYWFh2Lhxo08b1Vl21XF3lCPDULyYw+CQFBGRdzwGDCGE29oEWZZht9t92qjOcs1haO1T\nOCRFRNRlPAaM0NBQHDlyRD8+cuQIQkJCfNqoztLnMJyT3l4EDO6BQUTkHY9jNqtWrcJ9992HIUOG\nAABOnDiBp556yucN6wzXkJQWOIQX5c0jTOEwK0EYkzjal00jIvJ7HgPGqFGjkJ+fj8OHD0MIgVGj\nRl3Uwjpf0ie99ZXenst8KLKCJ9N/6ZclQYiIupNXC/ciIyMxefJkX7flojVfh+HVFq0AgwURkRc8\nBozjx49j/fr1OH78OCwWi/74V1995dOGdYbdLgDFCslUrz2gBu4OtERE3c1jwPjFL36BFStW4Ikn\nnsCOHTuwa9cuhIaGdkfbOsyuCgSNOAjZrK0bEQwYRERdxuMV1WKxYMKECRBCID4+HitXruy6TY66\nmF1V9WABAAaVi/KIiLqKx4Ahy9opkZGROH78OC5cuICioiKfN6wzVNV9wyazLbaHWkJEFHg8Dkll\nZmbiwoULWLZsGRYtWgRVVVtUm+0tbM0CRlRocA+1hIgo8LQbMFRVxYQJExAdHY309HR8/PHHaGxs\nvMjtU33HbhcQqgxJVjFWmY/Z4y/t6SYREQWMdoekZFnGz372M/3YaDT22mABAFa7DZKsop+cjNsm\nj0NwEPfoJiLqKh7nMFJTU1FYWNgdbbloFrt2269RMvVwS4iIAo/Hr+Dl5eWYO3currnmGrcaUlu2\nbPFpwzqjwd4IADDKDBhERF3Nq0nvzMzM7mjLRWu0OTIM2djDLSEiCjweA0Z2dnZ3tKNLNNgbAAAm\nmSXLiYi6mseAsXz58lZrLfXGISmLygyDiMhXPAaMKVOm6D83NjbinXfeQWpqqk8b1VnOgBHEDIOI\nqMt1eEjqxz/+Me655x6fNehiOO+SMimc9CYi6modrs4nSVKvvc3Womp3SXGfbiKirtehOQwhBL7+\n+mtMmDDB5w3rjEbHbbVmA4sOEhF1tQ7NYSiKgtzcXIwcOdKnjeqsRlXbByPM2Dv3HCci8mcBdVtt\no3AGjN5bvoSIyF95nMNYtGgRKisr9eOKigosWbLEp43qrEahrcMIN/XODZ6IiPyZx4BRV1eHyMhI\n/TgqKgo1NTU+bVRnWVEPoUoIMXIOg4ioq3kMGKqqoq7OtYtdbW0t7Ha7TxvVWVbRANhMMBqUnm4K\nEVHA8TiHMXv2bOTm5mLRokUAgJdeeglz5871ecM6wyo1QNiCoCgtV6YTEdHF8Rgw7rrrLsTHx2Pf\nvn0QQmDhwoXIysrqjrZ1iF21Q5WsELZwGJUOLy8hIiIPvNphKDs7u9ffLVVcVwoAEFYTFAYMF7dU\nkAAAFDhJREFUIqIu5/HK+sADD6CiokI/vnDhAh588EGfNqoz3jq1FwBgP5/EDIOIyAc8XllPnz6N\nqKgo/Tg6OhoFBQU+bVRnlNaVQVKNUCviOYdBROQDHgOG3W53uyvKarXCYrH4tFGdUW9rgKwaIUGC\nIjNgEBF1NY8BY9KkSVi5ciU++eQTfPLJJ8jLy0N6errXf2D//v2YOXMmMjIysH379jbPe/vtt3HF\nFVfgv//9r9ev3VS9rQGSaoSiyK3u30FERBfH46R3Xl4efv/73+M3v/kNAK221Lhx47x6cVVVsWHD\nBuzcuRPx8fFYsGABpk2b1mI/jdraWrz44oudrlElhECDrQGKGgIDh6OIiHzCY4ZhNBpx//334+mn\nn8YNN9yAv//971i7dq1XL3706FGkpKQgOTkZRqMRmZmZ2Lt3b4vztmzZgjvvvBNGY+d2ymu0WyAg\nALsBBk54ExH5RLsZhs1mw759+/C3v/0Nhw8fhs1mw3PPPed1JlBSUoKkpCT9OCEhAV988YXbOV99\n9RWKi4sxefJk7NixoxNvwbWXN+xGZhhERD7S5tfxJ554Atdffz12796N2bNn4/3330dkZGSHho2E\nEB6ff/zxx7FmzRqvf6c1DTYtYAhmGEREPtNmhvHSSy9h1KhRWLZsGcaPHw8AHZ5MTkxMxJkzZ/Tj\nkpISxMfH68e1tbU4ceIEfvKTn0AIgXPnzuHee+/Ftm3bMGLEiHZfOy4uXP/5glSm/WA3IMhkcHuu\nL+hr77c97AsX9oUL+6JrtBkwPvzwQ/zv//4vNm7ciMrKSmRlZXW46GBaWhoKCgpQVFSEuLg45Ofn\nY/PmzfrzYWFhOHjwoH78k5/8BI8++iiGDx/u8bXLyqr1n8+eLwcA2K0KpGbPBbq4uPA+9X7bw75w\nYV+4sC9cLjZwtjl+ExERgSVLluDVV1/F008/jcrKSjQ0NGDJkiXYvXu3Vy+uKArWrVuH3NxczJ49\nG5mZmUhNTcXWrVvx7rvvtjhfkqTODUk5tmZVbQrnMIiIfEQSHbhCW61W/POf/8Rrr72GP/zhD75s\nl0dlZdUoqCrEy9/+HcNiLkP+9/+E5WQaBpmHY+1PrunRtnUnfntyYV+4sC9c2BcuF5theFV80Mlo\nNGLWrFmYNWvWRf3RrrL7m9fwQ9VpfFd5CoA26W2x9c69OoiI/J1f31IUFRTp/oDdgPOVDT3TGCKi\nAOfXASM2OMbtWNgNqG2w9VBriIgCm18HjBbsnVspTkREnvl1wLCr7vMVwm5AanJED7WGiCiwdWjS\nu7exqe7DT4umXIGJIwb0UGuIiAKbX2cYNuGeYQzpH4MQs1/HQCKiXsuvA0bzISmzSemhlhARBT6/\nDhjlNfVux2YTswsiIl/x64BRXe++5oIZBhGR7/h1wLA3m8MIMjJgEBH5SkAFDFlm4UEiIl/x64Ch\nwhUwbGXJPdgSIqLA59ezxKpQIVQZDZ9NBwSzCyIiX/LrgGEXNkDI2v+IiMin/PpKq0JlsCAi6iZ+\nfbUVsEOofv0WiIj8hl9fbbUMg3MXRETdwa/nMATsgDBg2ugBGDs8vqebQ0QU0Pw8YKiAKuOmqakw\nGrhoj4jIl/x6SEpI2qS3ovj12yAi8gt+faUVjrukZInzGEREvua3AUMVKiAJSP77FoiI/IrfXm2d\nu+1JXIdBRNQt/PZqa3NsniSBk91ERN3BbwOGs1Kt7L9vgYjIr/jt1VYfkmKGQUTULfw4YDgzDAYM\nIqLu4LcBwy60DINDUkRE3cNvr7ZWZ4YhMcMgIuoOfhswnHMYCgMGEVG38NuAYbE7h6QYMIiIuoP/\nBgybFQCgyAwYRETdwW8DRqMzYDDDICLqFn4ZMKpqLbDYtCEpAzMMIqJu4Zf7YSx57C0kDq4AYjnp\nTUTUXXyeYezfvx8zZ85ERkYGtm/f3uL5nTt3IjMzE/PmzcNPf/pTnD171qvXLausAwAYZL+MeURE\nfsenAUNVVWzYsAHPPfcc3nzzTeTn5+PkyZNu5wwfPhyvvvoq3njjDcyYMQMbN2707sUlFQADBhFR\nd/FpwDh69ChSUlKQnJwMo9GIzMxM7N271+2csWPHIigoCAAwcuRIlJSUePfishYwjAoDBhFRd/Bp\nwCgpKUFSUpJ+nJCQgNLS0jbPf+WVV5Cenu7di+sZBucwiIi6g0+/ngshvD73jTfewH//+1+88MIL\nXp0vSdprh4cEIy4uvFPtCxR9/f03xb5wYV+4sC+6hk8DRmJiIs6cOaMfl5SUID4+vsV5Bw4cwPbt\n2/Hiiy/CaDR69+KOISnVBpSVVXdJe/1RXFx4n37/TbEvXNgXLuwLl4sNnD4dkkpLS0NBQQGKiopg\nsViQn5+PadOmuZ1z7NgxrF+/Htu2bUN0dLT3L+4YkjJxDoOIqFv49GqrKArWrVuH3NxcCCGwYMEC\npKamYuvWrUhLS8OUKVOwadMm1NfX48EHH4QQAv3798czzzzj+cUl56Q35zCIiLqDz7+ep6ent5jI\nXr58uf7z888/36nXlWRnhuHlEBYREV0UvywNAgBwTHqbDAwYRETdwS8DRnR4EOcwiIi6mV8GDINB\n1u+SCvL2rioiIroo/hkwZFnPMIKYYRARdQv/DBgGSV+4Z2KGQUTULfwyYCiya0jKzElvIqJu4ZcB\nw6BI+pCUmRkGEVG38NOA0WQOw8A5DCKi7uCXAUNRZEiyCqHKMBm40puIqDv4ZcAwKjKg2AC7AqPB\nL98CEZHf8curraJIkIyNENYgBgwiom7il1dbWRGQDDYIaxAUxS/fAhGR3/HPq63SCAAQVhNkSerh\nxhAR9Q1+GTBUgzNgBPVwS4iI+g6/DBh2uR4AINkZMIiIuotfBgxVbgAAGEVwD7eEiKjv8MuAcVoc\nAQAEqRE93BIior7DLwNGg1QJ27n+CFHjeropRER9hl8GDABQq2IQEsSyIERE3cV/A0ZdOIIZMIiI\nuo1/BgwBiPowmE2sI0VE1F38MmAEl6cBQkFYMEubExF1F78MGOvmLsaVg2Iwd+Kgnm4KEVGf4ZeT\nAEMGRCHv5pE93Qwioj7FLzMMIiLqfgwYRETkFQYMIiLyCgMGERF5hQGDiIi8woBBREReYcAgIiKv\nMGAQEZFXGDCIiMgrDBhEROQVBgwiIvKKzwPG/v37MXPmTGRkZGD79u0tnrdYLFi5ciVmzJiBm2++\nGWfOnPF1k4iIqBN8GjBUVcWGDRvw3HPP4c0330R+fj5Onjzpds4rr7yCyMhI/OMf/8Btt92GTZs2\n+bJJRETUST4NGEePHkVKSgqSk5NhNBqRmZmJvXv3up2zd+9eZGdnAwAyMjJw8OBBXzaJiIg6yacB\no6SkBElJSfpxQkICSktL3c4pLS1FYmIiAEBRFERERKCiosKXzSIiok7wacAQQnT4HCEEJEnyVZOI\niKiTfLqBUmJiotskdklJCeLj41ucU1xcjISEBNjtdtTU1CAyMtLja8fFhXd5e/0V+8KFfeHCvnBh\nX3QNn2YYaWlpKCgoQFFRESwWC/Lz8zFt2jS3c6ZMmYLXXnsNAPD2229j/PjxvmwSERF1kiS8GTe6\nCPv378evf/1rCCGwYMECLFu2DFu3bkVaWhqmTJkCi8WCVatW4auvvkJUVBQ2b96MAQMG+LJJRETU\nCT4PGEREFBi40puIiLzCgEFERF5hwCAiIq/4XcDwVJsq0KxduxbXXXcd5syZoz9WWVmJ3NxcZGRk\nYOnSpaiurtaf+9WvfoUZM2Zg3rx5+Oqrr3qiyT5RXFyMW2+9FbNmzcKcOXPw5z//GUDf7AuLxYKc\nnBxkZWVhzpw5eOqppwAAhYWFuOmmm5CRkYG8vDzYbDb9/ECv16aqKrKzs3H33XcD6Lt9MXXqVMyd\nOxdZWVlYsGABgC7+jAg/YrfbxfTp00VhYaGwWCxi7ty54sSJEz3dLJ/6z3/+I44dOyZmz56tP7Zx\n40axfft2IYQQv//978WmTZuEEEK899574s477xRCCHH48GGRk5PT/Q32kdLSUnHs2DEhhBA1NTVi\nxowZ4sSJE32yL4QQoq6uTgghhM1mEzk5OeLw4cPiwQcfFHv27BFCCPHYY4+Jl156SQghxK5du8T6\n9euFEELk5+eLFStW9Eibfen5558XDz30kLjrrruEEKLP9sXUqVNFRUWF22Nd+RnxqwzDm9pUgeba\na69FRESE22NN629lZ2frfbB3715kZWUBAK6++mpUV1fj3Llz3dtgH4mLi8OwYcMAAKGhoUhNTUVJ\nSUmf7AsACA4OBqB9Y7bZbJAkCYcOHUJGRgYArS/+9a9/AQj8em3FxcV4//33kZOToz/20Ucf9cm+\nEEJAVVW3x7ryM+JXAcOb2lR9QXl5OWJjYwFoF9Ly8nIA7nW5AK1/SkpKeqSNvlRYWIjjx4/j6quv\nxvnz5/tkX6iqiqysLEycOBETJ07EwIEDERERAVnWPtKJiYn6+w30em2PP/44HnnkEb2k0IULFxAZ\nGdkn+0KSJCxduhTz58/Hyy+/DABd+hnxaWmQria4ZKRdrfVPoNXlqq2txfLly7F27VqEhoa2+f4C\nvS9kWcbrr7+Ompoa3HfffS22DQBc77d5X4gAqtf23nvvITY2FsOGDcOhQ4cAaO+v+XvuC30BALt3\n79aDQm5uLgYNGtSlnxG/Chje1KbqC/r164dz584hNjYWZWVliImJAaB9QyguLtbPKy4uDqj+sdls\nWL58OebNm4fp06cD6Lt94RQWFoYxY8bgyJEjqKqqgqqqkGXZ7f06+6Kj9dr8wWeffYZ9+/bh/fff\nR2NjI2pra/H444+jurq6z/UFoGUQABATE4Pp06fj6NGjXfoZ8ashKW9qUwWi5t8Epk6dildffRUA\n8Nprr+l9MG3aNLz++usAgMOHDyMiIkJPRQPB2rVrMWTIENx22236Y32xL8rLy/U7XRoaGnDw4EEM\nGTIE48aNw9tvvw3AvS+mTp0asPXa8vLy8N5772Hv3r3YvHkzxo0bhyeffLJP9kV9fT1qa2sBAHV1\ndfjwww8xdOjQLv2M+F1pkNZqUwWyhx56CIcOHUJFRQViY2PxwAMPYPr06XjwwQdx9uxZ9O/fH1u2\nbNEnxn/5y1/igw8+QHBwMJ544gmMGDGih99B1/j0009xyy23YOjQoZAkCZIkYeXKlbjqqquwYsWK\nPtUXX3/9NdasWQNVVaGqKmbNmoV77rkHp0+fRl5eHqqqqjBs2DBs2rQJRqOxz9Rr+/jjj/HHP/4R\nzz77bJ/si9OnT+P++++HJEmw2+2YM2cOli1bhoqKii77jPhdwCAiop7hV0NSRETUcxgwiIjIKwwY\nRETkFQYMIiLyCgMGERF5hQGDiIi8woBBfu2mm25CdnY2MjMzMWLECGRnZyM7Oxtr167t8Gvdcccd\nXpW7fvTRR3H48OHONLdDjh07hnfeecfnf4fIW1yHQQGhqKgICxYsaLf6qLNUhL94+eWXcfDgQWze\nvLmnm0IEwM9qSRF1xMGDB7Fp0yaMHDkSx44dw3333Yfy8nLs2rVL31BnzZo1GDt2LABg8uTJ2Llz\nJwYNGoTFixdj1KhR+Pzzz1FaWorZs2djxYoVAIDFixfj3nvvxaRJk7Bq1SqEhYXh5MmTKCkpwejR\no/HEE08A0GrzPPLII7hw4QIGDhwIu92OqVOn4uabb3Zr57lz5/DQQw/hwoULAIBJkybhjjvuwDPP\nPIO6ujpkZ2dj3LhxWLNmDT7//HNs3rwZ9fX1AIDly5cjPT0dBQUFWLx4MWbPno1PP/0UFosF69ev\nx+jRo7ulr6mPuJjNOoh6i8LCQjF+/Hi3xw4cOCCGDx8uvvjiC/2xppvLnDhxQlx//fX6cXp6uvju\nu++EEEIsWrRIPPTQQ0IIIaqqqsTYsWNFYWGh/twHH3wghBDi4YcfFrfccouwWq2isbFRzJw5Uxw6\ndEgIIcQ999wj/vCHPwghhDh9+rQYNWqU2L17d4u279ixQzz22GP6cVVVlRBCiL/+9a8iLy/Pre1Z\nWVni/PnzQgghiouLRXp6uqipqRE//PCDuPzyy0V+fr7+3q+//nphs9m870QiD5hhUEAbPHgwrrzy\nSv341KlT2Lp1K0pLS6EoCkpLS1FRUYGoqKgWv3vjjTcCAMLDwzFo0CAUFBQgOTm5xXk33HADDAbt\nozR8+HAUFBRg7NixOHToEH71q18BAAYMGKBnMs2NHDkSL774Ip588kmMGTMGkyZNavW8Tz/9FIWF\nhVi6dKlekFJRFJw+fRohISEIDg7GrFmzAAATJkyAoig4deoUUlNTve0uonYxYFBACw0NdTteuXIl\n1q9fj8mTJ0NVVVx11VVobGxs9XeDgoL0n2VZht1u79B53u6zcM011+C1117DgQMH8Le//Q07duzA\nCy+80OI8IQRGjBiBnTt3tniuoKCgxWOqqgbUXg/U8/xnBpDIA+HF/Rs1NTV6ddLdu3e3GQS6wtix\nY/Wy0kVFRfj4449bPa+wsBBhYWGYNWsW1qxZgy+//BKAtteFs4w5AIwePRonTpzAJ598oj929OhR\n/ef6+nrs2bMHgLZFKQCkpKR07ZuiPo0ZBgUMb75Nr127FsuWLUNSUhLGjRuH8PDwVn+/+Wu19Vx7\n561btw6rV69Gfn4+Bg8ejNGjR7v9PaeDBw/iz3/+MxRFgRACGzZsAABMnDgRf/rTn5CVlYXx48dj\nzZo1eOaZZ7Bp0yZUV1fDarVi4MCBePbZZwEAsbGx+Pbbb5GTkwOLxYLNmzdDURSPfULkLd5WS+Qj\njY2NMBqNkGUZJSUlyMnJwa5duzBw4MAu/1vOu6Q+/PDDLn9tIidmGEQ+8t133+HRRx+FEAKqqmLl\nypU+CRZE3YUZBhEReYWT3kRE5BUGDCIi8goDBhEReYUBg4iIvMKAQUREXmHAICIir/x/apbYj523\no60AAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x7f72f867ef90>"
+ "\u003cmatplotlib.figure.Figure at 0x7f97f1330850\u003e"
]
},
"metadata": {
"tags": []
- }
+ },
+ "output_type": "display_data"
}
+ ],
+ "source": [
+ "def plot(train, test, label):\n",
+ " plt.title('MNIST model %s' % label)\n",
+ " plt.plot(train, label='train %s' % label)\n",
+ " plt.plot(test, label='test %s' % label)\n",
+ " plt.legend()\n",
+ " plt.xlabel('Training step')\n",
+ " plt.ylabel(label.capitalize())\n",
+ " plt.show()\n",
+ " \n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " hp = tf.contrib.training.HParams(\n",
+ " learning_rate=0.05,\n",
+ " max_steps=tf.constant(500),\n",
+ " )\n",
+ " train_ds = setup_mnist_data(True, hp, 50)\n",
+ " test_ds = setup_mnist_data(False, hp, 1000)\n",
+ " tf_train = autograph.to_graph(train)\n",
+ " all_losses = tf_train(train_ds, test_ds, hp)\n",
+ "\n",
+ " with tf.Session() as sess:\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ " (train_losses, test_losses, train_accuracies,\n",
+ " test_accuracies) = sess.run(all_losses)\n",
+ " \n",
+ " plot(train_losses, test_losses, 'loss')\n",
+ " plot(train_accuracies, test_accuracies, 'accuracy')"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "HNqUFL4deCsL",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "HNqUFL4deCsL"
},
- "cell_type": "markdown",
"source": [
"# 4. Case study: building an RNN\n"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "YkC1k4HEQ7rw",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "YkC1k4HEQ7rw"
},
- "cell_type": "markdown",
"source": [
"In this exercise we build and train a model similar to the RNNColorbot model that was used in the main Eager notebook. The model is adapted for converting and training in graph mode."
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "7nkPDl5CTCNb",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "7nkPDl5CTCNb"
},
- "cell_type": "markdown",
"source": [
"To get started, we load the colorbot dataset. The code is identical to that used in the other exercise and its details are unimportant."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "A0uREmVXCQEw",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "A0uREmVXCQEw"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def parse(line):\n",
" \"\"\"Parses a line from the colors dataset.\n",
@@ -1137,7 +1034,7 @@
" A tuple of three tensors (rgb, chars, length), of shapes: (batch_size, 3),\n",
" (batch_size, max_sequence_length, 256) and respectively (batch_size).\n",
" \"\"\"\n",
- " items = tf.string_split([line], \",\").values\n",
+ " items = tf.string_split(tf.expand_dims(line, 0), \",\").values\n",
" rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.0\n",
" color_name = items[0]\n",
" chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256)\n",
@@ -1169,23 +1066,21 @@
" dataset = dataset.repeat()\n",
" if training:\n",
" dataset = dataset.shuffle(buffer_size=3000)\n",
- " dataset = dataset.padded_batch(batch_size, padded_shapes=([None], [None, None], []))\n",
+ " dataset = dataset.padded_batch(batch_size, padded_shapes=((None,), (None, None), ()))\n",
" return dataset\n",
"\n",
"\n",
- "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/train.csv\"\n",
- "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/extras/colorbot/data/test.csv\"\n",
+ "train_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv\"\n",
+ "test_url = \"https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv\"\n",
"data_dir = \"tmp/rnn/data\""
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "waZ89t3DTUla",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "waZ89t3DTUla"
},
- "cell_type": "markdown",
"source": [
"Next, we set up the RNNColobot model, which is very similar to the one we used in the main exercise.\n",
"\n",
@@ -1193,17 +1088,19 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "9v8AJouiC44V",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "9v8AJouiC44V"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def model_components():\n",
" lower_cell = tf.contrib.rnn.LSTMBlockCell(256)\n",
@@ -1227,12 +1124,13 @@
" Returns:\n",
" A Tensor of shape (max_sequence_length, batch_size, output_size).\n",
" \"\"\"\n",
- " hidden_outputs = []\n",
- " autograph.utils.set_element_type(hidden_outputs, tf.float32)\n",
+ " hidden_outputs = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n",
" state, output = cell.zero_state(batch_size, tf.float32)\n",
+ " initial_state_shape = state.shape\n",
+ " initial_output_shape = output.shape\n",
" n = tf.shape(chars)[0]\n",
" i = 0\n",
- " while i < n:\n",
+ " while i \u003c n:\n",
" ch = chars[i]\n",
" cell_output, (state, output) = cell.call(ch, (state, output))\n",
" hidden_outputs.append(cell_output)\n",
@@ -1261,50 +1159,51 @@
" A Tensor of shape (batch_size, 3) - the model predictions.\n",
" \"\"\"\n",
" (chars, length) = inputs\n",
- " chars_time_major = tf.transpose(chars, [1, 0, 2])\n",
+ " chars_time_major = tf.transpose(chars, (1, 0, 2))\n",
" chars_time_major.set_shape((None, batch_size, 256))\n",
"\n",
" hidden_outputs = rnn_layer(chars_time_major, lower_cell, batch_size, training)\n",
" final_outputs = rnn_layer(hidden_outputs, upper_cell, batch_size, training)\n",
"\n",
" # Grab just the end-of-sequence from each output.\n",
- " indices = tf.stack([length - 1, range(batch_size)], axis=1)\n",
+ " indices = tf.stack((length - 1, range(batch_size)), axis=1)\n",
" sequence_ends = tf.gather_nd(final_outputs, indices)\n",
+ " sequence_ends.set_shape((batch_size, 128))\n",
" return relu_layer(sequence_ends)\n",
"\n",
"def loss_fn(labels, predictions):\n",
" return tf.reduce_mean((predictions - labels) ** 2)"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "JjK4gXFvFsf4",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "JjK4gXFvFsf4"
},
- "cell_type": "markdown",
"source": [
"The train and test functions are also similar to the ones used in the Eager notebook. Since the network requires a fixed batch size, we'll train in a single shot, rather than by epoch."
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "ZWQMExk0S6X6",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "ZWQMExk0S6X6"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"def train(optimizer, train_data, lower_cell, upper_cell, relu_layer, batch_size, num_steps):\n",
" iterator = train_data.make_one_shot_iterator()\n",
" step = 0\n",
- " while step < num_steps:\n",
+ " while step \u003c num_steps:\n",
" labels, chars, sequence_length = iterator.get_next()\n",
" predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=True)\n",
" loss = loss_fn(labels, predictions)\n",
@@ -1319,7 +1218,7 @@
" total_loss = 0.0\n",
" iterator = eval_data.make_one_shot_iterator()\n",
" step = 0\n",
- " while step < num_steps:\n",
+ " while step \u003c num_steps:\n",
" labels, chars, sequence_length = iterator.get_next()\n",
" predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=False)\n",
" total_loss += loss_fn(labels, predictions)\n",
@@ -1340,16 +1239,14 @@
" # Here, we create a no_op that will drive the execution of all other code in\n",
" # this function. Autograph will add the necessary control dependencies.\n",
" return tf.no_op()"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "iopcs5hXG2od",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "iopcs5hXG2od"
},
- "cell_type": "markdown",
"source": [
"Finally, we add code to run inference on a single input, which we'll read from the input.\n",
"\n",
@@ -1357,17 +1254,19 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "DyU0wnnAFEYj",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
}
- }
+ },
+ "colab_type": "code",
+ "id": "DyU0wnnAFEYj"
},
- "cell_type": "code",
+ "outputs": [],
"source": [
"@autograph.do_not_convert(run_as=autograph.RunMode.PY_FUNC)\n",
"def draw_prediction(color_name, pred):\n",
@@ -1389,16 +1288,14 @@
" draw_prediction(color_name, pred)\n",
" # Create an op that will drive the entire function.\n",
" return tf.no_op()"
- ],
- "execution_count": 0,
- "outputs": []
+ ]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "Nt0Kv5OCHip0",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "Nt0Kv5OCHip0"
},
- "cell_type": "markdown",
"source": [
"Finally, we put everything together.\n",
"\n",
@@ -1406,218 +1303,132 @@
]
},
{
+ "cell_type": "code",
+ "execution_count": 0,
"metadata": {
- "id": "-GmWa0GtYWdh",
- "colab_type": "code",
"colab": {
"autoexec": {
"startup": false,
"wait_interval": 0
},
- "output_extras": [
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {},
- {}
- ],
- "base_uri": "https://localhost:8080/",
- "height": 668
+ "height": 415
},
- "outputId": "61f4af1d-c81e-44db-9079-1a7b8ed8ce58",
+ "colab_type": "code",
"executionInfo": {
+ "elapsed": 15536,
"status": "ok",
- "timestamp": 1522345877153,
- "user_tz": 240,
- "elapsed": 75500,
+ "timestamp": 1531750946373,
"user": {
- "displayName": "Dan Moldovan",
- "photoUrl": "//lh5.googleusercontent.com/-Rneh8xjecyk/AAAAAAAAAAI/AAAAAAAACB4/c5vwsJpbktY/s50-c-k-no/photo.jpg",
- "userId": "112023154726779574577"
- }
- }
+ "displayName": "",
+ "photoUrl": "",
+ "userId": ""
+ },
+ "user_tz": 240
+ },
+ "id": "-GmWa0GtYWdh",
+ "outputId": "2e7a9856-9809-43a3-8b43-3c8514ea43e9"
},
- "cell_type": "code",
- "source": [
- "def run_input_loop(sess, inference_ops, color_name_placeholder):\n",
- " \"\"\"Helper function that reads from input and calls the inference ops in a loop.\"\"\"\n",
- "\n",
- " tb = widgets.TabBar([\"RNN Colorbot\"])\n",
- " while True:\n",
- " with tb.output_to(0):\n",
- " try:\n",
- " color_name = six.moves.input(\"Give me a color name (or press 'enter' to exit): \")\n",
- " except (EOFError, KeyboardInterrupt):\n",
- " break\n",
- " if not color_name:\n",
- " break\n",
- " with tb.output_to(0):\n",
- " tb.clear_tab()\n",
- " sess.run(inference_ops, {color_name_placeholder: color_name})\n",
- " plt.show()\n",
- "\n",
- "with tf.Graph().as_default():\n",
- " # Read the data.\n",
- " batch_size = 64\n",
- " train_data = load_dataset(data_dir, train_url, batch_size)\n",
- " eval_data = load_dataset(data_dir, test_url, 50, training=False)\n",
- " \n",
- " # Create the model components.\n",
- " lower_cell, upper_cell, relu_layer = model_components()\n",
- " # Create the helper placeholder for inference.\n",
- " color_name_placeholder = tf.placeholder(tf.string, shape=())\n",
- " \n",
- " # Compile the train / test code.\n",
- " tf_train_model = autograph.to_graph(train_model)\n",
- " train_model_ops = tf_train_model(\n",
- " train_data, eval_data, batch_size, lower_cell, upper_cell, relu_layer, train_steps=100)\n",
- " \n",
- " # Compile the inference code.\n",
- " tf_inference = autograph.to_graph(inference)\n",
- " inference_ops = tf_inference(color_name_placeholder, lower_cell, upper_cell, relu_layer)\n",
- " \n",
- " with tf.Session() as sess:\n",
- " sess.run(tf.global_variables_initializer())\n",
- " \n",
- " # Run training and testing.\n",
- " sess.run(train_model_ops)\n",
- " \n",
- " # Run the inference loop.\n",
- " run_input_loop(sess, inference_ops, color_name_placeholder)"
- ],
- "execution_count": 0,
"outputs": [
{
+ "name": "stdout",
"output_type": "stream",
"text": [
- "('Successfully downloaded', 'train.csv', 28010L, 'bytes.')\n",
- "('Successfully downloaded', 'test.csv', 2414L, 'bytes.')\n",
- "Step 0 train loss 0.37890616\n",
- "Step 10 train loss 0.18515904\n",
- "Step 20 train loss 0.0892782\n",
- "Step 30 train loss 0.07883155\n",
- "Step 40 train loss 0.08585831\n",
- "Step 50 train loss 0.09302989\n",
- "Step 60 train loss 0.089012615\n",
- "Step 70 train loss 0.07275697\n",
- "Step 80 train loss 0.06644974\n",
- "Step 90 train loss 0.0854013\n",
- "Test loss 0.13216865Colorbot is ready to generate colors!\n",
- "\n",
+ "Test loss 0.138294\n",
+ "Colorbot is ready to generate colors!\n",
"\n",
"\n"
- ],
- "name": "stdout"
+ ]
},
{
- "output_type": "display_data",
"data": {
- "text/plain": [
- "<IPython.core.display.HTML object>"
- ],
"text/html": [
- "<link rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'></link>"
+ "\u003clink rel=stylesheet type=text/css href='/nbextensions/google.colab/tabbar.css'\u003e\u003c/link\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42bb90\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "text/plain": [
- "<IPython.core.display.HTML object>"
- ],
"text/html": [
- "<script src='/nbextensions/google.colab/tabbar_main.min.js'></script>"
+ "\u003cscript src='/nbextensions/google.colab/tabbar_main.min.js'\u003e\u003c/script\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42be10\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "text/plain": [
- "<IPython.core.display.HTML object>"
- ],
"text/html": [
- "<div id=\"id1\"></div>"
+ "\u003cdiv id=\"id1\"\u003e\u003c/div\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42bd90\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b102d936-3379-11e8-ac70-0242ac110002\"] = colab_lib.createTabBar({\"contentBorder\": [\"0px\"], \"borderColor\": [\"#a7a7a7\"], \"tabNames\": [\"RNN Colorbot\"], \"initialSelection\": 0, \"location\": \"top\", \"contentHeight\": [\"initial\"], \"elementId\": \"id1\"});\n",
- "//# sourceURL=js_e223a56194"
+ "window[\"a6045494-8903-11e8-99f9-c8d3ffb5fbe0\"] = colab_lib.createTabBar({\"location\": \"top\", \"borderColor\": [\"#a7a7a7\"], \"initialSelection\": 0, \"elementId\": \"id1\", \"contentHeight\": [\"initial\"], \"contentBorder\": [\"0px\"], \"tabNames\": [\"RNN Colorbot\"]});\n",
+ "//# sourceURL=js_02f896cbda"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab810\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b103532a-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_b8c6a821fb"
+ "window[\"a6045495-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_7e8f9f77a0"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab710\u003e"
]
},
"metadata": {
"tags": [
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b105b28c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_44805e254b"
+ "window[\"a6045496-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_5531553c2f"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab6d0\u003e"
]
},
"metadata": {
@@ -1625,17 +1436,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b106197a-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_a63d3c6c47"
+ "window[\"a6045497-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+ "//# sourceURL=js_d1f809ec17"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab990\u003e"
]
},
"metadata": {
@@ -1643,17 +1454,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b1069f44-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"b106197a-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_7e203b8bce"
+ "window[\"a6045498-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a6045497-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_3a3123cadb"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2aba50\u003e"
]
},
"metadata": {
@@ -1661,17 +1472,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"b1070f38-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_d53293d4a7"
+ "window[\"a6045499-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_1a0e1f7d6f"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab890\u003e"
]
},
"metadata": {
@@ -1679,17 +1490,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6d90d5c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"b105b28c-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_3000dc2c05"
+ "window[\"a8e54762-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a6045496-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_6213539615"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2abad0\u003e"
]
},
"metadata": {
@@ -1697,17 +1508,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6da872c-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_4136f669a3"
+ "window[\"a8e54763-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_0bd7f95c6e"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2ab950\u003e"
]
},
"metadata": {
@@ -1715,17 +1526,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6dac868-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_2f70dd9aee"
+ "window[\"a8e54764-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+ "//# sourceURL=js_215f004f6b"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2abb10\u003e"
]
},
"metadata": {
@@ -1733,17 +1544,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6db07d8-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c6dac868-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_7226726048"
+ "window[\"a8e54765-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54764-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_a06186c8ad"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2aba90\u003e"
]
},
"metadata": {
@@ -1751,17 +1562,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c6dcc6fe-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_72e7709865"
+ "window[\"a8e54766-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_383fbaae67"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ee2abc50\u003e"
]
},
"metadata": {
@@ -1769,14 +1580,14 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAVQAAAFZCAYAAADHDNdrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAB9JJREFUeJzt3E1Lle0ax+HTF4jeEAyMBhE0DawI\nwsCH0AIlaGBWNJBo0CDoA0TQhmDXuKAGDioiCA2KlEAlnl05FD9Co8BeaGCQoBDa2jPZsXt4Bvu/\n0+o4Rmvd1zW4rsmP84bFamo0Go0C4H/WvNYHAPhVCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKDy\nUxgeHq5Dhw7V4OBgPXz4sHp7e+vWrVt15cqVOnnyZN2/f78ajUbdvn27+vr6qqenp65du1YrKytV\nVfXhw4e6cOFC9fX1VV9fX01PT1dV1dzcXHV3d9eDBw/q+PHj9ccff9TExMRaXpWfWOtaHwD+zuvX\nr+vOnTs1MTFRbW1tdf78+dW16enpGh8fr/b29hobG6upqal6/Phxbdy4sS5evFgjIyM1NDRUly5d\nqv3799fw8HC9efOmTp8+XVNTU1VV9enTp2pubq5nz57V5ORk3bhxo44dO7ZW1+UnZkJl3Zudna2D\nBw9WR0dHbdiwoQYHB1fX9u7dW+3t7VVV9fLlyxocHKytW7dWa2trnTp1qp4/f16Li4s1MzNT586d\nq6qqXbt21YEDB1an1OXl5Tpx4kRVVe3Zs6fevXv3Yy/IL8OEyrr3+fPnamtrW/2+ffv21c//+Xxh\nYaHu3r1bjx49qqqqlZWVam9vr4WFhWo0GnXmzJnVvYuLi9XV1VVVVS0tLbVp06aqqmpubq6vX7/+\nX+/Dr0tQWfe2bNlSi4uLq98/fvz43X0dHR3V29tbQ0ND3zxfXl6ulpaWevLkSW3evPmbtbm5ufyB\n+W155Wfd6+zsrJmZmZqfn68vX77U2NjYd/cdOXKkxsfHa2lpqaqqRkdH6+nTp9Xa2lqHDx+u0dHR\nqqpaWlqqy5cv1/v373/YHfg9CCrrXmdnZw0MDNTAwECdPXu2enp6vrvv6NGj1dPTUwMDA9Xf318v\nXryo7u7uqqq6evVqzc7OVn9/fw0MDNTOnTtrx44dP/Ia/Aaa/B8qP4NGo1FNTU1VVfXq1au6efPm\nX06qsFZMqKx78/Pz1dXVVW/fvq1Go1GTk5O1b9++tT4W/BcTKj+FkZGRunfvXjU1NdXu3bvr+vXr\ntW3btrU+FnxDUAFCvPIDhAgqQMi6+WH/kX8eXesjAPytf/3jz79cM6EChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCI\noAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIig\nAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAC\nhAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCI\noAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIig\nAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAC\nhAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKE\nCCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQI\nKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgq\nQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpA\niKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkCIoAKECCpAiKAChAgqQIigAoQIKkBI\nU6PRaKz1IQB+BSZUgBBBBQgRVIAQQQUIEVSAEEEFCBFUgBBBBQgRVIAQQQUIEVSAEEEFCBFUgBBB\nBQgRVIAQQQUIEVSAEEEFCBFUgBBBBQgRVIAQQQUIEVSAkH8D1Aj8lNhhe7QAAAAASUVORK5CYII=\n",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQwAAAENCAYAAAD60Fs2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAACL9JREFUeJzt3F+IlXUex/Gv2ziiBRGVOQaFd2JBzOg5aiH+IZGoJgmM\n/uhVGIlgFE0QEYHdFQaRGBJ10VX0D5TAi8jKomGmILsYjEAkmBwbRIxKGDV/e7G7w8ouux9jd911\nX6+rcx6e85zveS7e/J7zb0ZrrRVA4A8XewDgf4dgADHBAGKCAcQEA4gJBhATDC6Kp59+urrdbt13\n3301OjpaK1euvNgjERCMS9yaNWtqeHj4Yo9xnq+++qqGh4frs88+q7fffruqqmbMmHGRpyIhGPxH\n/fbbb/XDDz/U9ddfX7NmzbrY43CBBOMS9tRTT9XExERt2bKlBgYG6vXXX69vvvmm7r///up0OrV+\n/foaHR2d3n/Tpk318ssv1wMPPFADAwP18MMP18mTJ6uq6vTp0zU0NFRLly6tTqdTGzZsqBMnTlRV\n1eTkZG3ZsqWWLl1a69atq3feeWf6mDt37qxt27bV0NBQLVmypN5777169tln6+DBgzUwMFA7d+78\nm7kPHz5cmzZtqk6nU3fffXft37+/qqrGx8er0+lM7/fMM8/UrbfeOn1/aGio3nzzzX/tSeR8jUva\n6tWr2/DwcGuttWPHjrVut9sOHDjQWmvtiy++aN1ut504caK11trGjRvb2rVr2/fff9+mpqbaxo0b\n244dO1prrb311lvt0UcfbVNTU+3cuXNtbGys/fLLL6211h566KG2ffv2dvr06Xbo0KG2bNmy6ed8\n5ZVX2k033dQ++uij1lprU1NT7f33328PPvjg9IwjIyNt5cqVrbXWzpw509auXdt2797dzpw504aH\nh1t/f387cuTI9OsZGxtrrbW2bt26dvvtt7fDhw+31lpbtWpVO3To0L/rVNJas8L4P9D+/HOhvXv3\n1qpVq2rFihVVVbV8+fK6+eab69NPP53e9957760bbrihent764477qhDhw5VVVVPT0+dPHmyjhw5\nUjNmzKhFixbV5ZdfXseOHauvv/66nnzyyZo5c2YtXLiwNmzYUHv27Jk+Zn9/f61Zs6aqqnp7e//h\nrAcPHqxTp07VI488Uj09PbVs2bJavXp1ffDBB1VVtWTJkhodHa3jx49XVdW6devqyy+/rPHx8fr1\n119r4cKF/6Kzxt/Tc7EH4D/n6NGjtW/fvvr444+r6k8hOXv2bC1fvnx6n2uuuWb69uzZs+vUqVNV\nVXXPPffUsWPH6oknnqiff/65BgcH6/HHH6/Jycm68sora/bs2dOPmz9/fo2NjU3fnzdvXjzj5ORk\n9fX1nbdt/vz5NTk5WVVVnU6n9u/fX9ddd111u93qdru1Z8+e6u3trcWLF1/A2eD3EIxL3F9/+tDX\n11fr16+v7du3X/Bxenp6auvWrbV169Y6evRobd68uRYsWFC33XZb/fTTT3Xq1KmaM2dOVVVNTEzU\n3Llz/+4M/8zcuXNrYmLivG1Hjx6tBQsWVFVVt9utF198sfr6+qrT6dTAwEA999xz1dvbW91u94Jf\nFxfGJckl7tprr63x8fGqqhocHKz9+/fX559/XufOnaupqakaHR2tH3/88Z8eZ2RkpL777rs6d+5c\nzZkzp3p6euqyyy6refPmVX9/f7300kt1+vTp+vbbb+vdd9+twcHB3zXvLbfcUnPmzKnXXnutzp49\nWyMjI/XJJ5/UnXfeWVVVN954Y82aNav27t1bnU6nrrjiirr66qvrww8/PO8NUf49BOMSt3nz5tq1\na1d1u93at29f7dq1q3bv3l3Lly+v1atX1xtvvDH9Hsc/WgkcP368tm3bVosXL6677rqrli5dOh2F\nHTt21Pj4eK1YsaK2bdtWjz322HmXORdi5syZ9eqrr9aBAwdq2bJl9fzzz9cLL7wwvcKo+tMq46qr\nrpq+1PlLKBYtWvS7npPcjNb8gQ6QscIAYoIBxAQDiAkGEPuv/R7GxN7+iz0C/F/rG/z6b7ZZYQAx\nwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQE\nA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMM\nICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCA\nmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBi\nggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJ\nBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYY\nQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAA\nMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHE\nBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhAT\nDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiAkGEBMMICYYQEww\ngJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOICQYQEwwgJhhATDCAmGAAMcEA\nYoIBxAQDiAkGEBMMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBiggHEBAOI\nCQYQEwwgNqO11i72EMD/BisMICYYQEwwgJhgADHBAGKCAcQEA4gJBhATDCAmGEBMMICYYAAxwQBi\nggHEBAOICQYQEwwgJhhATDCAmGAAMcEAYoIBxAQDiP0RoqNMBlokHDIAAAAASUVORK5CYII=\n",
"text/plain": [
- "<matplotlib.figure.Figure at 0x7f72f402e850>"
+ "\u003cmatplotlib.figure.Figure at 0x7f97ee42bb90\u003e"
]
},
"metadata": {
@@ -1785,17 +1596,17 @@
"outputarea_id1",
"user_output"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c70592aa-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c6da872c-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_25c3aaf79a"
+ "window[\"a8e54767-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54763-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_28bd08ac10"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efc10\u003e"
]
},
"metadata": {
@@ -1803,17 +1614,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c70842c0-3379-11e8-ac70-0242ac110002\"] = google.colab.output.getActiveOutputArea();\n",
- "//# sourceURL=js_984c56b816"
+ "window[\"a8e54768-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.getActiveOutputArea();\n",
+ "//# sourceURL=js_ae2887f57d"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efb50\u003e"
]
},
"metadata": {
@@ -1821,17 +1632,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c708dec4-3379-11e8-ac70-0242ac110002\"] = document.querySelector(\"#id1_content_0\");\n",
- "//# sourceURL=js_e0451a1217"
+ "window[\"a8e54769-8903-11e8-99f9-c8d3ffb5fbe0\"] = document.querySelector(\"#id1_content_0\");\n",
+ "//# sourceURL=js_608805a786"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef710\u003e"
]
},
"metadata": {
@@ -1839,17 +1650,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c7092726-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c708dec4-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_7aa23d7385"
+ "window[\"a8e5476a-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54769-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_3d87cf7d0f"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efa90\u003e"
]
},
"metadata": {
@@ -1857,17 +1668,17 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c7099044-3379-11e8-ac70-0242ac110002\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
- "//# sourceURL=js_5722756ddb"
+ "window[\"a8e5476b-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"id1\"].setSelectedTabIndex(0);\n",
+ "//# sourceURL=js_5e91101199"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efa50\u003e"
]
},
"metadata": {
@@ -1875,24 +1686,149 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
},
{
- "output_type": "stream",
- "text": [
- "Give me a color name (or press 'enter' to exit): \n"
- ],
- "name": "stdout"
+ "data": {
+ "text/html": [
+ "\u003cdiv class=id_45185901 style=\"margin-right:10px; display:flex;align-items:center;\"\u003e\u003cspan style=\"margin-right: 3px;\"\u003e\u003c/span\u003e\u003c/div\u003e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.HTML at 0x7f97ee42bd90\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a8e5476c-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 span\");\n",
+ "//# sourceURL=js_f43052a94e"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef750\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a8e5476d-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a8e5476c-8903-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_bfc0fb76ce"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9efb10\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b0-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 input\");\n",
+ "//# sourceURL=js_7f167283fa"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef610\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b1-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a9e9b8b0-8903-11e8-99f9-c8d3ffb5fbe0\"].remove();\n",
+ "//# sourceURL=js_016ae4bf21"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef250\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
},
{
- "output_type": "display_data",
"data": {
"application/javascript": [
- "window[\"c7baac12-3379-11e8-ac70-0242ac110002\"] = google.colab.output.setActiveOutputArea(window[\"c70842c0-3379-11e8-ac70-0242ac110002\"]);\n",
- "//# sourceURL=js_cdd622e58f"
+ "window[\"a9e9b8b2-8903-11e8-99f9-c8d3ffb5fbe0\"] = jQuery(\".id_45185901 span\");\n",
+ "//# sourceURL=js_e666f179bc"
],
"text/plain": [
- "<IPython.core.display.Javascript object>"
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef550\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b3-8903-11e8-99f9-c8d3ffb5fbe0\"] = window[\"a9e9b8b2-8903-11e8-99f9-c8d3ffb5fbe0\"].text(\"Give me a color name (or press 'enter' to exit): \");\n",
+ "//# sourceURL=js_cbb9d14aec"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef1d0\u003e"
+ ]
+ },
+ "metadata": {
+ "tags": [
+ "id1_content_0",
+ "outputarea_id1",
+ "user_output"
+ ]
+ },
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/javascript": [
+ "window[\"a9e9b8b4-8903-11e8-99f9-c8d3ffb5fbe0\"] = google.colab.output.setActiveOutputArea(window[\"a8e54768-8903-11e8-99f9-c8d3ffb5fbe0\"]);\n",
+ "//# sourceURL=js_2967a79665"
+ ],
+ "text/plain": [
+ "\u003cIPython.core.display.Javascript at 0x7f97ea9ef1d0\u003e"
]
},
"metadata": {
@@ -1900,21 +1836,98 @@
"id1_content_0",
"outputarea_id1"
]
- }
+ },
+ "output_type": "display_data"
}
+ ],
+ "source": [
+ "def run_input_loop(sess, inference_ops, color_name_placeholder):\n",
+ " \"\"\"Helper function that reads from input and calls the inference ops in a loop.\"\"\"\n",
+ "\n",
+ " tb = widgets.TabBar([\"RNN Colorbot\"])\n",
+ " while True:\n",
+ " with tb.output_to(0):\n",
+ " try:\n",
+ " color_name = six.moves.input(\"Give me a color name (or press 'enter' to exit): \")\n",
+ " except (EOFError, KeyboardInterrupt):\n",
+ " break\n",
+ " if not color_name:\n",
+ " break\n",
+ " with tb.output_to(0):\n",
+ " tb.clear_tab()\n",
+ " sess.run(inference_ops, {color_name_placeholder: color_name})\n",
+ " plt.show()\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
+ " # Read the data.\n",
+ " batch_size = 64\n",
+ " train_data = load_dataset(data_dir, train_url, batch_size)\n",
+ " eval_data = load_dataset(data_dir, test_url, 50, training=False)\n",
+ " \n",
+ " # Create the model components.\n",
+ " lower_cell, upper_cell, relu_layer = model_components()\n",
+ " # Create the helper placeholder for inference.\n",
+ " color_name_placeholder = tf.placeholder(tf.string, shape=())\n",
+ " \n",
+ " # Compile the train / test code.\n",
+ " tf_train_model = autograph.to_graph(train_model)\n",
+ " train_model_ops = tf_train_model(\n",
+ " train_data, eval_data, batch_size, lower_cell, upper_cell, relu_layer, train_steps=100)\n",
+ " \n",
+ " # Compile the inference code.\n",
+ " tf_inference = autograph.to_graph(inference)\n",
+ " inference_ops = tf_inference(color_name_placeholder, lower_cell, upper_cell, relu_layer)\n",
+ " \n",
+ " with tf.Session() as sess:\n",
+ " sess.run(tf.global_variables_initializer())\n",
+ " \n",
+ " # Run training and testing.\n",
+ " sess.run(train_model_ops)\n",
+ " \n",
+ " # Run the inference loop.\n",
+ " run_input_loop(sess, inference_ops, color_name_placeholder)"
]
},
{
+ "cell_type": "markdown",
"metadata": {
- "id": "AHJ2c47U-A5W",
- "colab_type": "text"
+ "colab_type": "text",
+ "id": "AHJ2c47U-A5W"
},
- "cell_type": "markdown",
"source": [
"# Where do we go next?\n",
"\n",
- "Autograph is available in tensorflow.contrib, but it's still in its early stages. We're excited about the possibilities it brings — write your machine learning code in the flexible Eager style, but still enjoy all the benefits that come with running in graph mode. A beta version will be available soon -- stay tuned!"
+ "AutoGraph is still in its early stages, but is available in [tensorflow.contrib](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/autograph). We're excited about the possibilities it brings. New versions will be available soon — stay tuned!"
]
}
- ]
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [],
+ "default_view": {},
+ "name": "Dev Summit 2018 - Autograph",
+ "provenance": [
+ {
+ "file_id": "1wCZUh73zTNs1jzzYjqoxMIdaBWCdKJ2K",
+ "timestamp": 1522238054357
+ },
+ {
+ "file_id": "1_HpC-RrmIv4lNaqeoslUeWaX8zH5IXaJ",
+ "timestamp": 1521743157199
+ },
+ {
+ "file_id": "1mjO2fQ2F9hxpAzw2mnrrUkcgfb7xSGW-",
+ "timestamp": 1520522344607
+ }
+ ],
+ "version": "0.3.2",
+ "views": {}
+ },
+ "kernelspec": {
+ "display_name": "Python 2",
+ "name": "python2"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
}
diff --git a/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb b/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
index e8f16b431d..e7dfb13e15 100644
--- a/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
+++ b/tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb
@@ -11,6 +11,24 @@
}
},
"colab_type": "code",
+ "id": "u3B7Uh50lozN"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q tf-nightly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
"id": "qWUV0FYjDSKj"
},
"outputs": [],
@@ -62,7 +80,7 @@
"# ...into graph-building functions like this:\n",
"def tf_g(x):\n",
" with tf.name_scope('g'):\n",
- " \n",
+ "\n",
" def if_true():\n",
" with tf.name_scope('if_true'):\n",
" x_1, = x,\n",
@@ -76,7 +94,7 @@
" return x_1,\n",
"\n",
" x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n",
- " return x\n"
+ " return x"
]
},
{
@@ -101,14 +119,14 @@
"# Generate a graph-version of g and call it:\n",
"tf_g = autograph.to_graph(g)\n",
"\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" # The result works like a regular op: takes tensors in, returns tensors.\n",
" # You can inspect the graph using tf.get_default_graph().as_graph_def()\n",
" g_ops = tf_g(tf.constant(9.0))\n",
" with tf.Session() as sess:\n",
" print('Autograph value: %2.2f\\n' % sess.run(g_ops))\n",
- " \n",
- " \n",
+ "\n",
+ "\n",
"# You can view, debug and tweak the generated code:\n",
"print(autograph.to_code(g))"
]
@@ -155,10 +173,10 @@
"print('Original value: %d' % f([10,12,15,20]))\n",
"\n",
"tf_f = autograph.to_graph(f)\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" with tf.Session():\n",
" print('Graph value: %d\\n\\n' % tf_f(tf.constant([10,12,15,20])).eval())\n",
- " \n",
+ "\n",
"print(autograph.to_code(f))"
]
},
@@ -194,7 +212,7 @@
" return x * x\n",
"\n",
"tf_f = autograph.to_graph(f)\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" with tf.Session():\n",
" try:\n",
" print(tf_f(tf.constant(0)).eval())\n",
@@ -233,7 +251,7 @@
" n += 1\n",
" print(n)\n",
" return n\n",
- " \n",
+ "\n",
"tf_f = autograph.to_graph(f)\n",
"with tf.Graph().as_default():\n",
" with tf.Session():\n",
@@ -247,7 +265,7 @@
"id": "NqF0GT-VCVFh"
},
"source": [
- "Appending to lists in loops also works (we create a `TensorArray` for you behind the scenes)"
+ "Appending to lists in loops also works (we create a tensor list ops behind the scenes)"
]
},
{
@@ -268,15 +286,15 @@
"def f(n):\n",
" z = []\n",
" # We ask you to tell us the element dtype of the list\n",
- " z = autograph.utils.set_element_type(z, tf.int32)\n",
+ " autograph.set_element_type(z, tf.int32)\n",
" for i in range(n):\n",
" z.append(i)\n",
" # when you're done with the list, stack it\n",
" # (this is just like np.stack)\n",
- " return autograph.stack(z) \n",
+ " return autograph.stack(z)\n",
"\n",
"tf_f = autograph.to_graph(f)\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" with tf.Session():\n",
" print(tf_f(tf.constant(3)).eval())\n",
"\n",
@@ -327,7 +345,7 @@
"source": [
"tf_g = autograph.to_graph(fizzbuzz)\n",
"\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" # The result works like a regular op: takes tensors in, returns tensors.\n",
" # You can inspect the graph using tf.get_default_graph().as_graph_def()\n",
" g_ops = tf_g(tf.constant(15))\n",
@@ -384,7 +402,7 @@
" return x\n",
"\n",
"\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
" print(sess.run(square_log(tf.constant(4))))"
]
@@ -396,7 +414,7 @@
"id": "_R-Q7BbxmkBF"
},
"source": [
- "#### Now some exercises. Convert the TensorFlow code into AutoGraph'd Python code."
+ "#### Convert the TensorFlow code into Python code for AutoGraph"
]
},
{
@@ -439,8 +457,10 @@
"source": [
"@autograph.convert()\n",
"def square_if_positive(x):\n",
- " ... # \u003c\u003c\u003c fill it in!\n",
- " \n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
"with tf.Session() as sess:\n",
" print(sess.run(square_if_positive(tf.constant(4))))"
]
@@ -517,7 +537,7 @@
" x = tf.cond(tf.greater(x, 0), if_positive, lambda: x)\n",
" return x\n",
"\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
" print(sess.run(nearest_odd_square(tf.constant(4))))"
]
@@ -539,8 +559,10 @@
"source": [
"@autograph.convert()\n",
"def nearest_odd_square(x):\n",
- " ... # \u003c\u003c\u003c fill it in!\n",
- " \n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
"with tf.Session() as sess:\n",
" print(sess.run(nearest_odd_square(tf.constant(4))))"
]
@@ -578,7 +600,7 @@
" x = x + 1\n",
" return x\n",
"\n",
- "with tf.Graph().as_default(): \n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
" print(sess.run(nearest_odd_square(tf.constant(4))))"
]
@@ -612,8 +634,8 @@
"def square_until_stop(x, y):\n",
" x = tf.while_loop(lambda x: tf.less(x, y), lambda x: x * x, [x])\n",
" return x\n",
- " \n",
- "with tf.Graph().as_default(): \n",
+ "\n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
" print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
]
@@ -635,9 +657,11 @@
"source": [
"@autograph.convert()\n",
"def square_until_stop(x, y):\n",
- " ... # fill it in!\n",
- " \n",
- "with tf.Graph().as_default(): \n",
+ "\n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
" print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
]
@@ -672,8 +696,8 @@
" while x \u003c y:\n",
" x = x * x\n",
" return x\n",
- " \n",
- "with tf.Graph().as_default(): \n",
+ "\n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
" print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))"
]
@@ -707,7 +731,7 @@
"def argwhere_cumsum(x, threshold):\n",
" current_sum = 0.0\n",
" idx = 0\n",
- " \n",
+ "\n",
" for i in range(len(x)):\n",
" idx = i\n",
" if current_sum \u003e= threshold:\n",
@@ -715,10 +739,10 @@
" current_sum += x[i]\n",
" return idx\n",
"\n",
- "N = 10\n",
- "with tf.Graph().as_default(): \n",
+ "n = 10\n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
- " idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2)))\n",
+ " idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
" print(sess.run(idx))"
]
},
@@ -739,12 +763,14 @@
"source": [
"@autograph.convert()\n",
"def argwhere_cumsum(x, threshold):\n",
- " ...\n",
"\n",
- "N = 10\n",
- "with tf.Graph().as_default(): \n",
+ " pass # TODO: fill it in!\n",
+ "\n",
+ "\n",
+ "n = 10\n",
+ "with tf.Graph().as_default():\n",
" with tf.Session() as sess:\n",
- " idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2)))\n",
+ " idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
" print(sess.run(idx))"
]
},
@@ -784,10 +810,10 @@
" current_sum += x[i]\n",
" return idx\n",
"\n",
- "N = 10\n",
+ "n = 10\n",
"with tf.Graph().as_default(): \n",
" with tf.Session() as sess:\n",
- " idx = argwhere_cumsum(tf.ones(N), tf.constant(float(N/2)))\n",
+ " idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n",
" print(sess.run(idx))"
]
},
@@ -980,43 +1006,50 @@
"def train(train_ds, test_ds, hp):\n",
" m = mlp_model((28 * 28,))\n",
" opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n",
- " \n",
+ "\n",
" # We'd like to save our losses to a list. In order for AutoGraph\n",
" # to convert these lists into their graph equivalent,\n",
" # we need to specify the element type of the lists.\n",
" train_losses = []\n",
- " train_losses = autograph.utils.set_element_type(train_losses, tf.float32)\n",
" test_losses = []\n",
- " test_losses = autograph.utils.set_element_type(test_losses, tf.float32)\n",
" train_accuracies = []\n",
- " train_accuracies = autograph.utils.set_element_type(train_accuracies, tf.float32)\n",
" test_accuracies = []\n",
- " test_accuracies = autograph.utils.set_element_type(test_accuracies, tf.float32)\n",
- " \n",
+ " autograph.set_element_type(train_losses, tf.float32)\n",
+ " autograph.set_element_type(test_losses, tf.float32)\n",
+ " autograph.set_element_type(train_accuracies, tf.float32)\n",
+ " autograph.set_element_type(test_accuracies, tf.float32)\n",
+ "\n",
" # This entire training loop will be run in-graph.\n",
" i = tf.constant(0)\n",
" while i \u003c hp.max_steps:\n",
" train_x, train_y = get_next_batch(train_ds)\n",
" test_x, test_y = get_next_batch(test_ds)\n",
- " # add get next\n",
+ "\n",
" step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n",
" step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n",
+ "\n",
" if i % (hp.max_steps // 10) == 0:\n",
" print('Step', i, 'train loss:', step_train_loss, 'test loss:',\n",
" step_test_loss, 'train accuracy:', step_train_accuracy,\n",
" 'test accuracy:', step_test_accuracy)\n",
+ "\n",
" train_losses.append(step_train_loss)\n",
" test_losses.append(step_test_loss)\n",
" train_accuracies.append(step_train_accuracy)\n",
" test_accuracies.append(step_test_accuracy)\n",
+ "\n",
" i += 1\n",
- " \n",
- " # We've recorded our loss values and accuracies \n",
+ "\n",
+ " # We've recorded our loss values and accuracies\n",
" # to a list in a graph with AutoGraph's help.\n",
- " # In order to return the values as a Tensor, \n",
+ " # In order to return the values as a Tensor,\n",
" # we need to stack them before returning them.\n",
- " return (autograph.stack(train_losses), autograph.stack(test_losses), autograph.stack(train_accuracies),\n",
- " autograph.stack(test_accuracies))"
+ " return (\n",
+ " autograph.stack(train_losses),\n",
+ " autograph.stack(test_losses),\n",
+ " autograph.stack(train_accuracies),\n",
+ " autograph.stack(test_accuracies),\n",
+ " )"
]
},
{
@@ -1042,14 +1075,17 @@
" train_ds = setup_mnist_data(True, hp, 50)\n",
" test_ds = setup_mnist_data(False, hp, 1000)\n",
" tf_train = autograph.to_graph(train)\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = tf_train(train_ds, test_ds, hp)\n",
+ " loss_tensors = tf_train(train_ds, test_ds, hp)\n",
"\n",
" with tf.Session() as sess:\n",
" sess.run(tf.global_variables_initializer())\n",
- " (train_losses, test_losses, train_accuracies,\n",
- " test_accuracies) = sess.run([train_losses, test_losses, train_accuracies,\n",
- " test_accuracies])\n",
+ " (\n",
+ " train_losses,\n",
+ " test_losses,\n",
+ " train_accuracies,\n",
+ " test_accuracies\n",
+ " ) = sess.run(loss_tensors)\n",
+ "\n",
" plt.title('MNIST train/test losses')\n",
" plt.plot(train_losses, label='train loss')\n",
" plt.plot(test_losses, label='test loss')\n",
diff --git a/tensorflow/contrib/autograph/impl/api.py b/tensorflow/contrib/autograph/impl/api.py
index c7401c7df1..f7fe3de5da 100644
--- a/tensorflow/contrib/autograph/impl/api.py
+++ b/tensorflow/contrib/autograph/impl/api.py
@@ -99,6 +99,7 @@ def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
Returns:
A decorator that wraps the original function.
"""
+
def decorator(f):
"""Decorator implementation."""
@@ -109,8 +110,7 @@ def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
@wraps(f)
def py_func_wrapper(*args, **kwargs):
if kwargs:
- raise NotImplementedError(
- 'RunMode.PY_FUNC does not yet support kwargs')
+ raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
# TODO(mdan): Add support for kwargs.
return py_func.wrap_py_func(
f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
@@ -231,7 +231,10 @@ def to_graph(e,
Returns:
A function with a signature identical to `o`, but which when executed it
- creates TF a graph that has the same functionality as the original entity.
+ creates TF a graph that has the same functionality as the original entity.
+ Raises:
+ ValueError: If the converted function defines or refers to symbol names that
+ are reserved for AutoGraph.
"""
program_ctx = converter.ProgramContext(
recursive=recursive,
@@ -256,6 +259,19 @@ def to_graph(e,
compiled_node.__dict__[key] = val
compiled_fn = getattr(compiled_node, name)
+ # Need this so the source_mapping attribute is available for the context
+ # manager to access for runtime errors.
+ #
+ # Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
+ # symbol to the compiled module.
+ source_map_attribute_name = 'ag_source_map'
+ if getattr(compiled_fn, source_map_attribute_name, None) is not None:
+ raise ValueError('cannot convert %s because is has an attribute '
+ '"%s", which is reserved for AutoGraph.' %
+ (compiled_fn, source_map_attribute_name))
+ setattr(compiled_fn, source_map_attribute_name,
+ compiled_node.__dict__['ag_source_map__'])
+
if verbose:
logging.info('Compiled output of %s:\n\n%s\n', e, compiled_src)
@@ -292,7 +308,7 @@ def to_code(e,
conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)
code = '\n'.join(
- compiler.ast_to_source(dep, indentation)
+ compiler.ast_to_source(dep, indentation)[0]
for dep in reversed(tuple(six.itervalues(program_ctx.dependency_cache))))
return program_ctx.required_imports + '\n\n' + code
diff --git a/tensorflow/contrib/autograph/impl/api_test.py b/tensorflow/contrib/autograph/impl/api_test.py
index 9943093332..4de7df6572 100644
--- a/tensorflow/contrib/autograph/impl/api_test.py
+++ b/tensorflow/contrib/autograph/impl/api_test.py
@@ -206,8 +206,8 @@ class ApiTest(test.TestCase):
return x
with self.test_session() as sess:
- x = api.converted_call(
- test_fn, False, False, {}, constant_op.constant(-1))
+ x = api.converted_call(test_fn, False, False, {},
+ constant_op.constant(-1))
self.assertEqual(1, sess.run(x))
def test_converted_call_method(self):
@@ -274,8 +274,8 @@ class ApiTest(test.TestCase):
return self.x
with self.test_session() as sess:
- tc = api.converted_call(
- TestClass, False, False, {}, constant_op.constant(-1))
+ tc = api.converted_call(TestClass, False, False, {},
+ constant_op.constant(-1))
# tc is now a converted object.
x = tc.test_method()
self.assertEqual(1, sess.run(x))
@@ -305,6 +305,13 @@ class ApiTest(test.TestCase):
# Just check that it is parseable Python code.
self.assertIsNotNone(parser.parse_str(compiled_code))
+ def test_source_map_attribute_present(self):
+
+ def test_fn(y):
+ return y**2
+
+ self.assertTrue(hasattr(api.to_graph(test_fn), 'ag_source_map'))
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/impl/conversion.py b/tensorflow/contrib/autograph/impl/conversion.py
index 776d19f672..7bd0ba3f2d 100644
--- a/tensorflow/contrib/autograph/impl/conversion.py
+++ b/tensorflow/contrib/autograph/impl/conversion.py
@@ -28,26 +28,27 @@ from tensorflow.contrib.autograph.converters import asserts
from tensorflow.contrib.autograph.converters import break_statements
from tensorflow.contrib.autograph.converters import builtin_functions
from tensorflow.contrib.autograph.converters import call_trees
+from tensorflow.contrib.autograph.converters import conditional_expressions
from tensorflow.contrib.autograph.converters import continue_statements
from tensorflow.contrib.autograph.converters import control_flow
from tensorflow.contrib.autograph.converters import decorators
-from tensorflow.contrib.autograph.converters import ifexp
+from tensorflow.contrib.autograph.converters import directives
+from tensorflow.contrib.autograph.converters import error_handlers
from tensorflow.contrib.autograph.converters import lists
from tensorflow.contrib.autograph.converters import logical_expressions
from tensorflow.contrib.autograph.converters import name_scopes
+from tensorflow.contrib.autograph.converters import return_statements
from tensorflow.contrib.autograph.converters import side_effect_guards
-from tensorflow.contrib.autograph.converters import single_return
from tensorflow.contrib.autograph.converters import slices
from tensorflow.contrib.autograph.core import config
from tensorflow.contrib.autograph.core import converter
+from tensorflow.contrib.autograph.core import errors
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import inspect_utils
+from tensorflow.contrib.autograph.pyct import origin_info
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-from tensorflow.contrib.autograph.pyct.static_analysis import live_values
-from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.util import tf_inspect
@@ -157,7 +158,8 @@ def class_to_graph(c, program_ctx):
program_ctx=program_ctx,
arg_values={},
arg_types={'self': (c.__name__, c)},
- owner_type=c)
+ owner_type=c,
+ rewrite_errors=False)
if class_namespace is None:
class_namespace = namespace
else:
@@ -231,6 +233,8 @@ def _add_self_references(namespace, autograph_module):
ag_internal = imp.new_module('autograph')
ag_internal.converted_call = autograph_module.converted_call
ag_internal.utils = utils
+ ag_internal.rewrite_graph_construction_error = (
+ errors.rewrite_graph_construction_error)
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
@@ -239,11 +243,17 @@ def _add_self_references(namespace, autograph_module):
_add_reserved_symbol(namespace, 'ag__', ag_internal)
-def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
+def function_to_graph(f,
+ program_ctx,
+ arg_values,
+ arg_types,
+ owner_type=None,
+ rewrite_errors=True):
"""Specialization of `entity_to_graph` for callable functions."""
+
node, source = parser.parse_entity(f)
node = node.body[0]
-
+ origin_info.resolve(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = program_ctx.new_namer(namespace)
@@ -256,7 +266,7 @@ def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
arg_types=arg_types,
owner_type=owner_type)
context = converter.EntityContext(namer, entity_info, program_ctx)
- node = node_to_graph(node, context)
+ node = node_to_graph(node, context, rewrite_errors=rewrite_errors)
# TODO(mdan): This somewhat duplicates the call rename logic in call_treest.py
new_name, did_rename = namer.compiled_function_name(f.__name__, f, owner_type)
@@ -272,22 +282,13 @@ def function_to_graph(f, program_ctx, arg_values, arg_types, owner_type=None):
return node, new_name, namespace
-def _apply_transformer(node, context, converter_module):
- # TODO(mdan): Clear static analysis here.
- node = qual_names.resolve(node)
- node = activity.resolve(node, context.info, None)
- node = live_values.resolve(node, context.info, config.PYTHON_LITERALS)
- node = type_info.resolve(node, context.info)
- node = converter_module.transform(node, context)
- return node
-
-
-def node_to_graph(node, context):
+def node_to_graph(node, context, rewrite_errors=True):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
+ rewrite_errors: Boolean, whether or not to rewrite the error traceback.
Returns:
A tuple (node, deps):
@@ -295,28 +296,33 @@ def node_to_graph(node, context):
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
- # TODO(mdan): Verify arguments for correctness.
+ # TODO(mdan): Insert list_comprehensions somewhere.
- node = _apply_transformer(node, context, ifexp)
+ node = converter.standard_analysis(node, context, is_initial=True)
# Past this point, line numbers are no longer accurate so we ignore the
# source.
# TODO(mdan): Is it feasible to reconstruct intermediate source code?
context.info.source_code = None
- node = _apply_transformer(node, context, decorators)
- node = _apply_transformer(node, context, break_statements)
- node = _apply_transformer(node, context, asserts)
+
+ node = converter.apply_(node, context, decorators)
+ node = converter.apply_(node, context, directives)
+ node = converter.apply_(node, context, break_statements)
+ node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
- node = _apply_transformer(node, context, continue_statements)
+ node = converter.apply_(node, context, continue_statements)
context.info.namespace['len'] = len
- node = _apply_transformer(node, context, single_return)
- node = _apply_transformer(node, context, lists)
- node = _apply_transformer(node, context, slices)
- node = _apply_transformer(node, context, builtin_functions)
- node = _apply_transformer(node, context, call_trees)
- node = _apply_transformer(node, context, control_flow)
- node = _apply_transformer(node, context, logical_expressions)
- node = _apply_transformer(node, context, side_effect_guards)
- node = _apply_transformer(node, context, name_scopes)
+ node = converter.apply_(node, context, return_statements)
+ node = converter.apply_(node, context, lists)
+ node = converter.apply_(node, context, slices)
+ node = converter.apply_(node, context, builtin_functions)
+ node = converter.apply_(node, context, call_trees)
+ node = converter.apply_(node, context, control_flow)
+ node = converter.apply_(node, context, conditional_expressions)
+ node = converter.apply_(node, context, logical_expressions)
+ node = converter.apply_(node, context, side_effect_guards)
+ node = converter.apply_(node, context, name_scopes)
+ if rewrite_errors:
+ node = converter.apply_(node, context, error_handlers)
return node
diff --git a/tensorflow/contrib/autograph/impl/conversion_test.py b/tensorflow/contrib/autograph/impl/conversion_test.py
index f5279298af..207225a1ac 100644
--- a/tensorflow/contrib/autograph/impl/conversion_test.py
+++ b/tensorflow/contrib/autograph/impl/conversion_test.py
@@ -79,10 +79,12 @@ class ConversionTest(test.TestCase):
self.assertTrue(f in program_ctx.dependency_cache)
self.assertTrue(g in program_ctx.dependency_cache)
self.assertEqual('tf__f', program_ctx.dependency_cache[f].name)
- # need the extra .body[0] in order to step past the with tf.name_scope('f')
- # that is added automatically
+ # need one extra .body[0] in order to step past the try/except wrapper that
+ # is added automatically, the other for the with tf.name_scope('f') that is
+ # added automatically
self.assertEqual(
- 'tf__g', program_ctx.dependency_cache[f].body[0].body[0].value.func.id)
+ 'tf__g',
+ program_ctx.dependency_cache[f].body[0].body[0].body[0].value.func.id)
self.assertEqual('tf__g', program_ctx.dependency_cache[g].name)
def test_entity_to_graph_class_hierarchy(self):
diff --git a/tensorflow/contrib/autograph/lang/special_functions.py b/tensorflow/contrib/autograph/lang/special_functions.py
index 11135295a7..6149cbbd6c 100644
--- a/tensorflow/contrib/autograph/lang/special_functions.py
+++ b/tensorflow/contrib/autograph/lang/special_functions.py
@@ -26,6 +26,43 @@ from __future__ import print_function
from tensorflow.contrib.autograph.operators import data_structures
+def tensor_list(elements,
+ element_dtype=None,
+ element_shape=None,
+ use_tensor_array=False):
+ """Creates an tensor list and populates it with the given elements.
+
+ This function provides a more uniform access to tensor lists and tensor
+ arrays, and allows optional initialization.
+
+ Note: this function is a simplified wrapper. If you need greater control,
+ it is recommended to use the underlying implementation directly.
+
+ Args:
+ elements: Iterable[tf.Tensor, ...], the elements to initially fill the list
+ with
+ element_dtype: Optional[tf.DType], data type for the elements in the list;
+ required if the list is empty
+ element_shape: Optional[tf.TensorShape], shape for the elements in the list;
+ required if the list is empty
+ use_tensor_array: bool, whether to use the more compatible but restrictive
+ tf.TensorArray implementation
+ Returns:
+ Union[tf.Tensor, tf.TensorArray], the new list.
+ Raises:
+ ValueError: for invalid arguments
+ """
+ if not (elements or (element_dtype and element_shape)):
+ raise ValueError(
+ 'element_dtype and element_shape are required for empty lists')
+ if use_tensor_array:
+ return data_structures.tf_tensor_array_new(elements, element_dtype,
+ element_shape)
+ else:
+ return data_structures.tf_tensor_list_new(elements, element_dtype,
+ element_shape)
+
+
def stack(list_or_tensor, element_dtype=None, strict=True):
"""Stacks the input, if it admits the notion of stacking.
diff --git a/tensorflow/contrib/autograph/lang/special_functions_test.py b/tensorflow/contrib/autograph/lang/special_functions_test.py
index a49cb64075..db492cc5c6 100644
--- a/tensorflow/contrib/autograph/lang/special_functions_test.py
+++ b/tensorflow/contrib/autograph/lang/special_functions_test.py
@@ -28,7 +28,23 @@ from tensorflow.python.platform import test
class SpecialFunctionsTest(test.TestCase):
- def test_basic(self):
+ def test_tensor_list_from_elements(self):
+ elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]
+
+ l = special_functions.tensor_list(elements)
+ sl = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
+ with self.test_session() as sess:
+ self.assertAllEqual(sess.run(sl), [[1, 2], [3, 4]])
+
+ def test_tensor_list_array_from_elements(self):
+ elements = [constant_op.constant([1, 2]), constant_op.constant([3, 4])]
+
+ l = special_functions.tensor_list(elements, use_tensor_array=True)
+ sl = l.stack()
+ with self.test_session() as sess:
+ self.assertAllEqual(sess.run(sl), [[1, 2], [3, 4]])
+
+ def test_stack(self):
self.assertEqual(special_functions.stack(1, strict=False), 1)
self.assertListEqual(
special_functions.stack([1, 2, 3], strict=False), [1, 2, 3])
diff --git a/tensorflow/contrib/autograph/operators/data_structures.py b/tensorflow/contrib/autograph/operators/data_structures.py
index 06d8727b0f..cc0a3c3544 100644
--- a/tensorflow/contrib/autograph/operators/data_structures.py
+++ b/tensorflow/contrib/autograph/operators/data_structures.py
@@ -28,7 +28,6 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import tensor_array_ops
-from tensorflow.python.ops import variables
# TODO(mdan): Once control flow supports objects, repackage as a class.
@@ -48,29 +47,101 @@ def new_list(iterable=None):
else:
elements = ()
- # TODO(mdan): Extend these criteria.
- if any(isinstance(el, variables.Variable) for el in elements):
+ if elements:
+ # When the list contains elements, it is assumed to be a "Python" lvalue
+ # list.
return _py_list_new(elements)
- return _tf_tensor_list_new(elements)
+ return tf_tensor_list_new(elements)
-def _tf_tensor_list_new(elements):
+def tf_tensor_array_new(elements, element_dtype=None, element_shape=None):
"""Overload of new_list that stages a Tensor list creation."""
elements = tuple(ops.convert_to_tensor(el) for el in elements)
+
+ all_dtypes = set(el.dtype for el in elements)
+ if len(all_dtypes) == 1:
+ inferred_dtype, = tuple(all_dtypes)
+ if element_dtype is not None and element_dtype != inferred_dtype:
+ raise ValueError(
+ 'incompatible dtype; specified: {}, inferred from {}: {}'.format(
+ element_dtype, elements, inferred_dtype))
+ elif len(all_dtypes) > 1:
+ raise ValueError(
+ 'TensorArray requires all elements to have the same dtype:'
+ ' {}'.format(elements))
+ else:
+ if element_dtype is None:
+ raise ValueError('dtype is required to create an empty TensorArray')
+
+ all_shapes = set(tuple(el.shape.as_list()) for el in elements)
+ if len(all_shapes) == 1:
+ inferred_shape, = tuple(all_shapes)
+ if element_shape is not None and element_shape != inferred_shape:
+ raise ValueError(
+ 'incompatible shape; specified: {}, inferred from {}: {}'.format(
+ element_shape, elements, inferred_shape))
+ elif len(all_shapes) > 1:
+ raise ValueError(
+ 'TensorArray requires all elements to have the same shape:'
+ ' {}'.format(elements))
+ # TODO(mdan): We may want to allow different shapes with infer_shape=False.
+ else:
+ inferred_shape = None
+
+ if element_dtype is None:
+ element_dtype = inferred_dtype
+ if element_shape is None:
+ element_shape = inferred_shape
+
+ l = tensor_array_ops.TensorArray(
+ dtype=element_dtype,
+ size=len(elements),
+ dynamic_size=True,
+ infer_shape=(element_shape is None),
+ element_shape=element_shape)
+ for i, el in enumerate(elements):
+ l = l.write(i, el)
+ return l
+
+
+def tf_tensor_list_new(elements, element_dtype=None, element_shape=None):
+ """Overload of new_list that stages a Tensor list creation."""
+ elements = tuple(ops.convert_to_tensor(el) for el in elements)
+
all_dtypes = set(el.dtype for el in elements)
if len(all_dtypes) == 1:
- element_dtype = tuple(all_dtypes)[0]
+ inferred_dtype = tuple(all_dtypes)[0]
+ if element_dtype is not None and element_dtype != inferred_dtype:
+ raise ValueError(
+ 'incompatible dtype; specified: {}, inferred from {}: {}'.format(
+ element_dtype, elements, inferred_dtype))
else:
# Heterogeneous lists are ok.
- element_dtype = dtypes.variant
+ if element_dtype is not None:
+ raise ValueError(
+ 'specified dtype {} is inconsistent with that of elements {}'.format(
+ element_dtype, elements))
+ inferred_dtype = dtypes.variant
- # TODO(mdan): This may fail for elements of variable shapes.
all_shapes = set(tuple(el.shape.as_list()) for el in elements)
if len(all_shapes) == 1:
- element_shape = array_ops.shape(elements[0])
+ inferred_shape = array_ops.shape(elements[0])
+ if element_shape is not None and element_shape != inferred_shape:
+ raise ValueError(
+ 'incompatible shape; specified: {}, inferred from {}: {}'.format(
+ element_shape, elements, inferred_shape))
else:
# Heterogeneous lists are ok.
- element_shape = constant_op.constant(-1) # unknown shape, by convention
+ if element_shape is not None:
+ raise ValueError(
+ 'specified shape {} is inconsistent with that of elements {}'.format(
+ element_shape, elements))
+ inferred_shape = constant_op.constant(-1) # unknown shape, by convention
+
+ if element_dtype is None:
+ element_dtype = inferred_dtype
+ if element_shape is None:
+ element_shape = inferred_shape
l = list_ops.empty_tensor_list(
element_shape=element_shape, element_dtype=element_dtype)
diff --git a/tensorflow/contrib/autograph/operators/data_structures_test.py b/tensorflow/contrib/autograph/operators/data_structures_test.py
index 8bbb52d6c1..7ea11a839b 100644
--- a/tensorflow/contrib/autograph/operators/data_structures_test.py
+++ b/tensorflow/contrib/autograph/operators/data_structures_test.py
@@ -37,10 +37,51 @@ class ListTest(test.TestCase):
def test_new_list_tensor(self):
l = data_structures.new_list([3, 4, 5])
+ self.assertAllEqual(l, [3, 4, 5])
+
+ def test_tf_tensor_list_new(self):
+ l = data_structures.tf_tensor_list_new([3, 4, 5])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.int32)
with self.test_session() as sess:
self.assertAllEqual(sess.run(t), [3, 4, 5])
+ def test_tf_tensor_list_new_illegal_input(self):
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_list_new([3, 4.0])
+ # TODO(mdan): It might make more sense to type cast in this case.
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_list_new([3, 4], element_dtype=dtypes.float32)
+ # Tensor lists do support heterogeneous lists.
+ self.assertIsNot(data_structures.tf_tensor_list_new([3, [4, 5]]), None)
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_list_new([3, 4], element_shape=(2,))
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_list_new([], element_shape=(2,))
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_list_new([], element_dtype=dtypes.float32)
+
+ def test_tf_tensor_array_new(self):
+ l = data_structures.tf_tensor_array_new([3, 4, 5])
+ t = l.stack()
+ with self.test_session() as sess:
+ self.assertAllEqual(sess.run(t), [3, 4, 5])
+
+ def test_tf_tensor_array_new_illegal_input(self):
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_array_new([3, 4.0])
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_array_new([3, 4], element_dtype=dtypes.float32)
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_array_new([3, [4, 5]])
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_array_new([3, 4], element_shape=(2,))
+ with self.assertRaises(ValueError):
+ data_structures.tf_tensor_array_new([], element_shape=(2,))
+ # TAs can infer the shape.
+ self.assertIsNot(
+ data_structures.tf_tensor_array_new([], element_dtype=dtypes.float32),
+ None)
+
def test_append_tensor_list(self):
l = data_structures.new_list()
x = constant_op.constant([1, 2, 3])
diff --git a/tensorflow/contrib/autograph/pyct/anno.py b/tensorflow/contrib/autograph/pyct/anno.py
index 92f1370e05..1a52110ef3 100644
--- a/tensorflow/contrib/autograph/pyct/anno.py
+++ b/tensorflow/contrib/autograph/pyct/anno.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Handling annotations on AST nodes.
+"""AST node annotation support.
Adapted from Tangent.
"""
@@ -21,36 +21,87 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from enum import Enum
+import enum
+# pylint:disable=g-bad-import-order
+import gast
+# pylint:enable=g-bad-import-order
-class NoValue(Enum):
+
+# TODO(mdan): Shorten the names.
+# These names are heavily used, and anno.blaa
+# TODO(mdan): Replace the attr-dict mechanism with a more typed solution.
+
+
+class NoValue(enum.Enum):
def __repr__(self):
return self.name
class Basic(NoValue):
- """Container for annotation keys.
+ """Container for basic annotation keys.
The enum values are used strictly for documentation purposes.
"""
- QN = 'Qualified name, as it appeared in the code.'
+ QN = 'Qualified name, as it appeared in the code. See qual_names.py.'
SKIP_PROCESSING = (
'This node should be preserved as is and not processed any further.')
INDENT_BLOCK_REMAINDER = (
- 'When a node is annotated with this, the remainder of the block should '
- 'be indented below it. The annotation contains a tuple '
- '(new_body, name_map), where `new_body` is the new indented block and '
- '`name_map` allows renaming symbols.')
- ORIGIN = ('Contains OriginInfo objects specific to the annotated node. See '
- 'origin_information.py for definition.')
+ 'When a node is annotated with this, the remainder of the block should'
+ ' be indented below it. The annotation contains a tuple'
+ ' (new_body, name_map), where `new_body` is the new indented block and'
+ ' `name_map` allows renaming symbols.')
+ ORIGIN = ('Information about the source code that converted code originated'
+ ' from. See origin_information.py.')
+
+
+class Static(NoValue):
+ """Container for static analysis annotation keys.
+
+ The enum values are used strictly for documentation purposes.
+ """
+
+ # Deprecated - use reaching definitions instead.
+ # Symbols
+ # These flags are boolean.
+ IS_LOCAL = 'Symbol is local to the function scope being analyzed.'
+ IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
+
+ # Scopes
+ # Scopes are represented by objects of type activity.Scope.
+ SCOPE = 'The scope for the annotated node. See activity.py.'
+ # TODO(mdan): Drop these in favor of accessing the child's SCOPE.
+ ARGS_SCOPE = 'The scope for the argument list of a function call.'
+ COND_SCOPE = 'The scope for the test node of a conditional statement.'
+ BODY_SCOPE = (
+ 'The scope for the main body of a statement (True branch for if '
+ 'statements, main body for loops).')
+ ORELSE_SCOPE = (
+ 'The scope for the orelse body of a statement (False branch for if '
+ 'statements, orelse body for loops).')
+
+ # Static analysis annotations.
+ DEFINITIONS = (
+ 'Reaching definition information. See reaching_definitions.py.')
+ ORIG_DEFINITIONS = (
+ 'The value of DEFINITIONS that applied to the original code before any'
+ ' conversion.')
+ DEFINED_VARS_IN = (
+ 'Symbols defined when entering the node. See reaching_definitions.py.')
+ LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
FAIL = object()
+def keys(node, field_name='___pyct_anno'):
+ if not hasattr(node, field_name):
+ return frozenset()
+ return frozenset(getattr(node, field_name).keys())
+
+
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
if (default is FAIL or (hasattr(node, field_name) and
(key in getattr(node, field_name)))):
@@ -88,3 +139,19 @@ def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
+
+
+def dup(node, copy_map, field_name='___pyct_anno'):
+ """Recursively copies annotations in an AST tree.
+
+ Args:
+ node: ast.AST
+ copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
+ key. All annotations with the source key will be copied to identical
+ annotations with the destination key.
+ field_name: str
+ """
+ for n in gast.walk(node):
+ for k in copy_map:
+ if hasanno(n, k, field_name):
+ setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
diff --git a/tensorflow/contrib/autograph/pyct/anno_test.py b/tensorflow/contrib/autograph/pyct/anno_test.py
index f2c0c8cf05..5ef4da61a3 100644
--- a/tensorflow/contrib/autograph/pyct/anno_test.py
+++ b/tensorflow/contrib/autograph/pyct/anno_test.py
@@ -32,22 +32,27 @@ class AnnoTest(test.TestCase):
def test_basic(self):
node = ast.Name()
+ self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
anno.setanno(node, 'foo', 3)
+
+ self.assertEqual(anno.keys(node), {'foo'})
self.assertTrue(anno.hasanno(node, 'foo'))
self.assertEqual(anno.getanno(node, 'foo'), 3)
self.assertEqual(anno.getanno(node, 'bar', default=7), 7)
anno.delanno(node, 'foo')
+
+ self.assertEqual(anno.keys(node), set())
self.assertFalse(anno.hasanno(node, 'foo'))
with self.assertRaises(AttributeError):
anno.getanno(node, 'foo')
self.assertIsNone(anno.getanno(node, 'foo', default=None))
- def test_copyanno(self):
+ def test_copy(self):
node_1 = ast.Name()
anno.setanno(node_1, 'foo', 3)
@@ -58,6 +63,22 @@ class AnnoTest(test.TestCase):
self.assertTrue(anno.hasanno(node_2, 'foo'))
self.assertFalse(anno.hasanno(node_2, 'bar'))
+ def test_duplicate(self):
+ node = ast.If(
+ test=ast.Num(1),
+ body=[ast.Expr(ast.Name('bar', ast.Load()))],
+ orelse=[])
+ anno.setanno(node, 'spam', 1)
+ anno.setanno(node, 'ham', 1)
+ anno.setanno(node.body[0], 'ham', 1)
+
+ anno.dup(node, {'spam': 'eggs'})
+
+ self.assertTrue(anno.hasanno(node, 'spam'))
+ self.assertTrue(anno.hasanno(node, 'ham'))
+ self.assertTrue(anno.hasanno(node, 'eggs'))
+ self.assertFalse(anno.hasanno(node.body[0], 'eggs'))
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/ast_util.py b/tensorflow/contrib/autograph/pyct/ast_util.py
index c4f82d1170..86e3f56a64 100644
--- a/tensorflow/contrib/autograph/pyct/ast_util.py
+++ b/tensorflow/contrib/autograph/pyct/ast_util.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Copy an AST tree, discarding annotations."""
+"""AST manipulation utilities."""
from __future__ import absolute_import
from __future__ import division
@@ -20,53 +20,60 @@ from __future__ import print_function
import ast
+import collections
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import parser
-class CleanCopier(gast.NodeVisitor):
- """Copies AST nodes.
+class CleanCopier(object):
+ """NodeTransformer-like visitor that copies an AST."""
- The copied nodes will ignore almost all fields that are prefixed by '__'.
- Exceptions make some annotations.
- """
+ def __init__(self, preserve_annos):
+ super(CleanCopier, self).__init__()
+ self.preserve_annos = preserve_annos
- # TODO(mdan): Parametrize which annotations get carried over.
+ def copy(self, node):
+ """Returns a deep copy of node (excluding some fields, see copy_clean)."""
+
+ if isinstance(node, list):
+ return [self.copy(n) for n in node]
+ elif isinstance(node, tuple):
+ return tuple(self.copy(n) for n in node)
+ elif not isinstance(node, (gast.AST, ast.AST)):
+ # Assuming everything that's not an AST, list or tuple is a value type
+ # and may simply be assigned.
+ return node
+
+ assert isinstance(node, (gast.AST, ast.AST))
- def generic_visit(self, node):
new_fields = {}
for f in node._fields:
- if f.startswith('__'):
- continue
- if not hasattr(node, f):
- continue
- v = getattr(node, f)
- if isinstance(v, list):
- v = [self.generic_visit(n) for n in v]
- elif isinstance(v, tuple):
- v = tuple(self.generic_visit(n) for n in v)
- elif isinstance(v, (gast.AST, ast.AST)):
- v = self.generic_visit(v)
- else:
- # Assume everything else is a value type.
- pass
- new_fields[f] = v
+ if not f.startswith('__') and hasattr(node, f):
+ new_fields[f] = self.copy(getattr(node, f))
new_node = type(node)(**new_fields)
- if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):
- anno.setanno(new_node, anno.Basic.SKIP_PROCESSING, True)
+
+ if self.preserve_annos:
+ for k in self.preserve_annos:
+ anno.copyanno(node, new_node, k)
return new_node
-def copy_clean(node):
- copier = CleanCopier()
- if isinstance(node, list):
- return [copier.visit(n) for n in node]
- elif isinstance(node, tuple):
- return tuple(copier.visit(n) for n in node)
- else:
- return copier.visit(node)
+def copy_clean(node, preserve_annos=None):
+ """Creates a deep copy of an AST.
+
+ The copy will not include fields that are prefixed by '__', with the
+ exception of user-specified annotations.
+
+ Args:
+ node: ast.AST
+ preserve_annos: Optional[Set[Hashable]], annotation keys to include in the
+ copy
+ Returns:
+ ast.AST
+ """
+ return CleanCopier(preserve_annos).copy(node)
class SymbolRenamer(gast.NodeTransformer):
@@ -78,7 +85,11 @@ class SymbolRenamer(gast.NodeTransformer):
def _process(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if qn in self.name_map:
- return gast.Name(str(self.name_map[qn]), node.ctx, None)
+ new_node = gast.Name(str(self.name_map[qn]), node.ctx, None)
+ # All annotations get carried over.
+ for k in anno.keys(node):
+ anno.copyanno(node, new_node, k)
+ return new_node
return self.generic_visit(node)
def visit_Name(self, node):
@@ -92,6 +103,7 @@ class SymbolRenamer(gast.NodeTransformer):
def rename_symbols(node, name_map):
+ """Renames symbols in an AST. Requires qual_names annotations."""
renamer = SymbolRenamer(name_map)
if isinstance(node, list):
return [renamer.visit(n) for n in node]
@@ -101,6 +113,7 @@ def rename_symbols(node, name_map):
def keywords_to_dict(keywords):
+ """Converts a list of ast.keyword objects to a dict."""
keys = []
values = []
for kw in keywords:
@@ -110,10 +123,7 @@ def keywords_to_dict(keywords):
class PatternMatcher(gast.NodeVisitor):
- """Matches a node against a pattern represented by a node.
-
- The pattern may contain wildcards represented by the symbol '_'.
- """
+ """Matches a node against a pattern represented by a node."""
def __init__(self, pattern):
self.pattern = pattern
@@ -175,11 +185,98 @@ class PatternMatcher(gast.NodeVisitor):
if v != p:
return self.no_match()
-
def matches(node, pattern):
+ """Basic pattern matcher for AST.
+
+ The pattern may contain wildcards represented by the symbol '_'. A node
+ matches a pattern if for every node in the tree, either there is a node of
+ the same type in pattern, or a Name node with id='_'.
+
+ Args:
+ node: ast.AST
+ pattern: ast.AST
+ Returns:
+ bool
+ """
if isinstance(pattern, str):
pattern = parser.parse_expression(pattern)
matcher = PatternMatcher(pattern)
matcher.visit(node)
return matcher.matches
+
+# TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
+def apply_to_single_assignments(targets, values, apply_fn):
+ """Applies a function to each individual assignment.
+
+ This function can process a possibly-unpacked (e.g. a, b = c, d) assignment.
+ It tries to break down the unpacking if possible. In effect, it has the same
+ effect as passing the assigned values in SSA form to apply_fn.
+
+ Examples:
+
+ The following will result in apply_fn(a, c), apply_fn(b, d):
+
+ a, b = c, d
+
+ The following will result in apply_fn(a, c[0]), apply_fn(b, c[1]):
+
+ a, b = c
+
+ The following will result in apply_fn(a, (b, c)):
+
+ a = b, c
+
+ It uses the visitor pattern to allow subclasses to process single
+ assignments individually.
+
+ Args:
+ targets: Union[List[ast.AST, ...], Tuple[ast.AST, ...], ast.AST, should be
+ used with the targets field of an ast.Assign node
+ values: ast.AST
+ apply_fn: Callable[[ast.AST, ast.AST], None], called with the
+ respective nodes of each single assignment
+ """
+ if not isinstance(targets, (list, tuple)):
+ targets = (targets,)
+ for target in targets:
+ if isinstance(target, (gast.Tuple, gast.List)):
+ for i in range(len(target.elts)):
+ target_el = target.elts[i]
+ if isinstance(values, (gast.Tuple, gast.List)):
+ value_el = values.elts[i]
+ else:
+ idx = parser.parse_expression(str(i))
+ value_el = gast.Subscript(values, gast.Index(idx), ctx=gast.Load())
+ apply_to_single_assignments(target_el, value_el, apply_fn)
+ else:
+ apply_fn(target, values)
+
+
+def iter_fields(node):
+ for field in sorted(node._fields):
+ try:
+ yield getattr(node, field)
+ except AttributeError:
+ pass
+
+
+def iter_child_nodes(node):
+ for field in iter_fields(node):
+ if isinstance(field, gast.AST):
+ yield field
+ elif isinstance(field, list):
+ for item in field:
+ if isinstance(item, gast.AST):
+ yield item
+
+
+def parallel_walk(node_a, node_b):
+ todo_a = collections.deque([node_a])
+ todo_b = collections.deque([node_b])
+ while todo_a and todo_b:
+ node_a = todo_a.popleft()
+ node_b = todo_b.popleft()
+ todo_a.extend(iter_child_nodes(node_a))
+ todo_b.extend(iter_child_nodes(node_b))
+ yield node_a, node_b
diff --git a/tensorflow/contrib/autograph/pyct/ast_util_test.py b/tensorflow/contrib/autograph/pyct/ast_util_test.py
index 3afa04a506..981e398b93 100644
--- a/tensorflow/contrib/autograph/pyct/ast_util_test.py
+++ b/tensorflow/contrib/autograph/pyct/ast_util_test.py
@@ -19,7 +19,10 @@ from __future__ import division
from __future__ import print_function
import ast
+import collections
+import textwrap
+from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import parser
@@ -29,53 +32,66 @@ from tensorflow.python.platform import test
class AstUtilTest(test.TestCase):
- def test_rename_symbols(self):
- node = ast.Tuple([
- ast.Name('a', ast.Load()),
- ast.Name('b', ast.Load()),
- ast.Attribute(ast.Name('b', None), 'c', ast.Store()),
- ast.Attribute(
- ast.Attribute(ast.Name('b', None), 'c', ast.Load()), 'd', None)
- ], None)
+ def setUp(self):
+ super(AstUtilTest, self).setUp()
+ self._invocation_counts = collections.defaultdict(lambda: 0)
+
+ def test_rename_symbols_basic(self):
+ node = parser.parse_str('a + b')
+ node = qual_names.resolve(node)
+
+ node = ast_util.rename_symbols(
+ node, {qual_names.QN('a'): qual_names.QN('renamed_a')})
+
+ self.assertIsInstance(node.body[0].value.left.id, str)
+ source, _ = compiler.ast_to_source(node)
+ self.assertEqual(source.strip(), 'renamed_a + b')
+
+ def test_rename_symbols_attributes(self):
+ node = parser.parse_str('b.c = b.c.d')
node = qual_names.resolve(node)
+
node = ast_util.rename_symbols(
- node, {
- qual_names.QN('a'):
- qual_names.QN('renamed_a'),
- qual_names.QN(qual_names.QN('b'), attr='c'):
- qual_names.QN('renamed_b_c'),
- })
-
- self.assertEqual(node.elts[0].id, 'renamed_a')
- self.assertTrue(isinstance(node.elts[0].ctx, ast.Load))
- self.assertEqual(node.elts[1].id, 'b')
- self.assertEqual(node.elts[2].id, 'renamed_b_c')
- self.assertTrue(isinstance(node.elts[2].ctx, ast.Store))
- self.assertEqual(node.elts[3].value.id, 'renamed_b_c')
- self.assertTrue(isinstance(node.elts[3].value.ctx, ast.Load))
+ node, {qual_names.from_str('b.c'): qual_names.QN('renamed_b_c')})
+
+ source, _ = compiler.ast_to_source(node)
+ self.assertEqual(source.strip(), 'renamed_b_c = renamed_b_c.d')
+
+ def test_rename_symbols_annotations(self):
+ node = parser.parse_str('a[i]')
+ node = qual_names.resolve(node)
+ anno.setanno(node, 'foo', 'bar')
+ orig_anno = anno.getanno(node, 'foo')
+
+ node = ast_util.rename_symbols(node,
+ {qual_names.QN('a'): qual_names.QN('b')})
+
+ self.assertIs(anno.getanno(node, 'foo'), orig_anno)
def test_copy_clean(self):
- ret = ast.Return(
- ast.BinOp(
- op=ast.Add(),
- left=ast.Name(id='a', ctx=ast.Load()),
- right=ast.Num(1)))
- setattr(ret, '__foo', 'bar')
- node = ast.FunctionDef(
- name='f',
- args=ast.arguments(
- args=[ast.Name(id='a', ctx=ast.Param())],
- vararg=None,
- kwarg=None,
- defaults=[]),
- body=[ret],
- decorator_list=[],
- returns=None)
+ node = parser.parse_str(
+ textwrap.dedent("""
+ def f(a):
+ return a + 1
+ """))
+ setattr(node.body[0], '__foo', 'bar')
new_node = ast_util.copy_clean(node)
- self.assertFalse(node is new_node)
- self.assertFalse(ret is new_node.body[0])
+ self.assertIsNot(new_node, node)
+ self.assertIsNot(new_node.body[0], node.body[0])
self.assertFalse(hasattr(new_node.body[0], '__foo'))
+ def test_copy_clean_preserves_annotations(self):
+ node = parser.parse_str(
+ textwrap.dedent("""
+ def f(a):
+ return a + 1
+ """))
+ anno.setanno(node.body[0], 'foo', 'bar')
+ anno.setanno(node.body[0], 'baz', 1)
+ new_node = ast_util.copy_clean(node, preserve_annos={'foo'})
+ self.assertEqual(anno.getanno(new_node.body[0], 'foo'), 'bar')
+ self.assertFalse(anno.hasanno(new_node.body[0], 'baz'))
+
def test_keywords_to_dict(self):
keywords = parser.parse_expression('f(a=b, c=1, d=\'e\')').keywords
d = ast_util.keywords_to_dict(keywords)
@@ -113,6 +129,52 @@ class AstUtilTest(test.TestCase):
self.assertNoMatch('super(Foo, self).__init__()',
'super(Bar, _).__init__(_)')
+ def _mock_apply_fn(self, target, source):
+ target, _ = compiler.ast_to_source(target)
+ source, _ = compiler.ast_to_source(source)
+ self._invocation_counts[(target.strip(), source.strip())] += 1
+
+ def test_apply_to_single_assignments_dynamic_unpack(self):
+ node = parser.parse_str('a, b, c = d')
+ node = node.body[0]
+ ast_util.apply_to_single_assignments(node.targets, node.value,
+ self._mock_apply_fn)
+ self.assertDictEqual(self._invocation_counts, {
+ ('a', 'd[0]'): 1,
+ ('b', 'd[1]'): 1,
+ ('c', 'd[2]'): 1,
+ })
+
+ def test_apply_to_single_assignments_static_unpack(self):
+ node = parser.parse_str('a, b, c = d, e, f')
+ node = node.body[0]
+ ast_util.apply_to_single_assignments(node.targets, node.value,
+ self._mock_apply_fn)
+ self.assertDictEqual(self._invocation_counts, {
+ ('a', 'd'): 1,
+ ('b', 'e'): 1,
+ ('c', 'f'): 1,
+ })
+
+ def test_parallel_walk(self):
+ ret = ast.Return(
+ ast.BinOp(
+ op=ast.Add(),
+ left=ast.Name(id='a', ctx=ast.Load()),
+ right=ast.Num(1)))
+ node = ast.FunctionDef(
+ name='f',
+ args=ast.arguments(
+ args=[ast.Name(id='a', ctx=ast.Param())],
+ vararg=None,
+ kwarg=None,
+ defaults=[]),
+ body=[ret],
+ decorator_list=[],
+ returns=None)
+ for child_a, child_b in ast_util.parallel_walk(node, node):
+ self.assertEqual(child_a, child_b)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/cfg.py b/tensorflow/contrib/autograph/pyct/cfg.py
index 666328781f..25fec7fd53 100644
--- a/tensorflow/contrib/autograph/pyct/cfg.py
+++ b/tensorflow/contrib/autograph/pyct/cfg.py
@@ -64,11 +64,19 @@ class Node(object):
self.prev = frozenset(self.prev)
def __repr__(self):
- return compiler.ast_to_source(self.ast_node).strip()
+ if isinstance(self.ast_node, gast.FunctionDef):
+ return 'def %s' % self.ast_node.name
+ elif isinstance(self.ast_node, gast.withitem):
+ source, _ = compiler.ast_to_source(self.ast_node.context_expr)
+ return source.strip()
+ source, _ = compiler.ast_to_source(self.ast_node)
+ return source.strip()
class Graph(
- collections.namedtuple('Graph', ['entry', 'exit', 'error', 'index'])):
+ collections.namedtuple(
+ 'Graph',
+ ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):
"""A Control Flow Graph.
The CFG maintains an index to allow looking up a CFG node by the AST node to
@@ -82,6 +90,11 @@ class Graph(
because these are shared, and wiring them would create a reverse path from
normal control flow into the error nodes, which we want to avoid.
+ The graph also maintains edges corresponding to higher level statements
+ like for-else loops. A node is considered successor of a statement if there
+ is an edge from a node that is lexically a child of that statement to a node
+ that is not. Statement predecessors are analogously defined.
+
Attributes:
entry: Node, the entry node
exit: FrozenSet[Node, ...], the exit nodes
@@ -89,6 +102,10 @@ class Graph(
error (errors propagated from function calls are not accounted)
index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG
node
+ stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
+ nodes to their predecessor CFG nodes
+ stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST
+ nodes to their successor CFG nodes
"""
def __repr__(self):
@@ -96,9 +113,8 @@ class Graph(
for node in self.index.values():
result += ' %s [label="%s"];\n' % (id(node), node)
for node in self.index.values():
- if node.next:
- result += ' %s -> {%s};\n' % (id(node), ', '.join(
- repr(id(n)) for n in node.next))
+ for next_ in node.next:
+ result += ' %s -> %s;\n' % (id(node), id(next_))
result += '}'
return result
@@ -108,6 +124,8 @@ class _WalkMode(Enum):
REVERSE = 2
+# TODO(mdan): Rename to DataFlowAnalyzer.
+# TODO(mdan): Consider specializations that use gen/kill/transfer abstractions.
class GraphVisitor(object):
"""Base class for a CFG visitors.
@@ -130,26 +148,22 @@ class GraphVisitor(object):
out: Dict[Node, Any], stores node-keyed state during a visit
"""
- def reset(self):
- self.in_ = {
- node: self.init_state(node) for node in self.graph.index.values()
- }
- self.out = {
- node: self.init_state(node) for node in self.graph.index.values()
- }
+ def __init__(self, graph):
+ self.graph = graph
+ self.reset()
def init_state(self, node):
"""State initialization function. Optional to overload.
An in/out state slot will be created for each node in the graph. Subclasses
- may overload this to control what that is initialized to.
+ must overload this to control what that is initialized to.
Args:
node: Node
"""
- del node
- return None
+ raise NotImplementedError('Subclasses must implement this.')
+ # TODO(mdan): Rename to flow?
def visit_node(self, node):
"""Visitor function.
@@ -161,6 +175,14 @@ class GraphVisitor(object):
"""
raise NotImplementedError('Subclasses must implement this.')
+ def reset(self):
+ self.in_ = {
+ node: self.init_state(node) for node in self.graph.index.values()
+ }
+ self.out = {
+ node: self.init_state(node) for node in self.graph.index.values()
+ }
+
def _visit_internal(self, mode):
"""Visits the CFG, depth-first."""
assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE)
@@ -169,7 +191,6 @@ class GraphVisitor(object):
elif mode == _WalkMode.REVERSE:
open_ = list(self.graph.exit)
closed = set()
- self.reset()
while open_:
node = open_.pop(0)
@@ -186,12 +207,10 @@ class GraphVisitor(object):
if should_revisit or next_ not in closed:
open_.append(next_)
- def visit_forward(self, graph):
- self.graph = graph
+ def visit_forward(self):
self._visit_internal(_WalkMode.FORWARD)
- def visit_reverse(self, graph):
- self.graph = graph
+ def visit_reverse(self):
self._visit_internal(_WalkMode.REVERSE)
@@ -244,8 +263,16 @@ class GraphBuilder(object):
# TODO(mdan): Too many primitives. Use classes.
self.leaves = set()
+ # Note: This mechanism requires that nodes are added in lexical order (top
+ # to bottom, depth first).
+ self.active_stmts = set()
+ self.owners = {} # type: Set[any]
+ self.forward_edges = set() # type: Tuple[Node, Node] # (from, to)
+
self.finally_sections = {}
- self.finally_section_subgraphs = {} # Values are [begin_node, exit_nodes]
+ # Dict values represent (entry, exits)
+ self.finally_section_subgraphs = {
+ } # type: Dict[ast.AST, Tuple[Node, Set[Node]]]
# Whether the guard section can be reached from the statement that precedes
# it.
self.finally_section_has_direct_flow = {}
@@ -275,6 +302,7 @@ class GraphBuilder(object):
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
+ self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second)
@@ -285,6 +313,7 @@ class GraphBuilder(object):
raise ValueError('%s added twice' % ast_node)
node = Node(next_=set(), prev=set(), ast_node=ast_node)
self.node_index[ast_node] = node
+ self.owners[node] = frozenset(self.active_stmts)
if self.head is None:
self.head = node
@@ -299,6 +328,25 @@ class GraphBuilder(object):
return node
+ def begin_statement(self, stmt):
+ """Marks the beginning of a statement.
+
+ Args:
+ stmt: Hashable, a key by which the statement can be identified in
+ the CFG's stmt_prev and stmt_next attributes
+ """
+ self.active_stmts.add(stmt)
+
+ def end_statement(self, stmt):
+ """Marks the end of a statement.
+
+ Args:
+ stmt: Hashable, a key by which the statement can be identified in
+ the CFG's stmt_prev and stmt_next attributes; must match a key
+ previously passed to begin_statement.
+ """
+ self.active_stmts.remove(stmt)
+
def add_ordinary_node(self, ast_node):
"""Grows the graph by adding an ordinary CFG node.
@@ -505,11 +553,35 @@ class GraphBuilder(object):
for node in self.node_index.values():
node.freeze()
+ # Build the statement edges.
+ stmt_next = {}
+ stmt_prev = {}
+ for node, _ in self.forward_edges:
+ for stmt in self.owners[node]:
+ if stmt not in stmt_next:
+ stmt_next[stmt] = set()
+ if stmt not in stmt_prev:
+ stmt_prev[stmt] = set()
+ for first, second in self.forward_edges:
+ stmts_exited = self.owners[first] - self.owners[second]
+ for stmt in stmts_exited:
+ stmt_next[stmt].add(second)
+ stmts_entered = self.owners[second] - self.owners[first]
+ for stmt in stmts_entered:
+ stmt_prev[stmt].add(first)
+ for stmt in stmt_next:
+ stmt_next[stmt] = frozenset(stmt_next[stmt])
+ for stmt in stmt_prev:
+ stmt_prev[stmt] = frozenset(stmt_prev[stmt])
+
+ # Construct the final graph object.
result = Graph(
entry=self.head,
exit=self.leaves,
error=self.errors,
- index=self.node_index)
+ index=self.node_index,
+ stmt_prev=stmt_prev,
+ stmt_next=stmt_next)
# Reset the state.
self.reset()
@@ -523,8 +595,6 @@ class AstToCfg(gast.NodeVisitor):
A separate CFG will be constructed for each function.
"""
- # TODO(mdan): Figure out how to deal with closures.
-
def __init__(self):
super(AstToCfg, self).__init__()
@@ -577,6 +647,13 @@ class AstToCfg(gast.NodeVisitor):
self.builder.add_continue_node(node, try_node, guards)
def visit_FunctionDef(self, node):
+ # We also keep the FunctionDef node in the CFG. This allows us to determine
+ # things like reaching definitions via closure. Note that the function body
+ # will be stored in a separate graph, because function definitions are not
+ # the same as function calls.
+ if self.builder is not None:
+ self.builder.add_ordinary_node(node)
+
self.builder_stack.append(self.builder)
self.builder = GraphBuilder(node)
@@ -622,7 +699,7 @@ class AstToCfg(gast.NodeVisitor):
)
if try_node is None:
raise ValueError('%s that is not enclosed by any FunctionDef' % node)
- self.builder.add_error_node(node, try_node, guards)
+ self.builder.add_error_node(node, guards)
def visit_Assert(self, node):
# Ignoring the effect of exceptions.
@@ -637,6 +714,7 @@ class AstToCfg(gast.NodeVisitor):
# targets of jump statements like break/continue/etc. Since there is no
# statement that can interrupt a conditional, we don't need to track their
# lexical scope. That may change in the future.
+ self.builder.begin_statement(node)
self.builder.enter_cond_section(node)
self._process_basic_statement(node.test)
@@ -650,8 +728,10 @@ class AstToCfg(gast.NodeVisitor):
self.visit(stmt)
self.builder.exit_cond_section(node)
+ self.builder.end_statement(node)
def visit_While(self, node):
+ self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
@@ -670,8 +750,10 @@ class AstToCfg(gast.NodeVisitor):
self.visit(stmt)
self.builder.exit_section(node)
+ self.builder.end_statement(node)
def visit_For(self, node):
+ self.builder.begin_statement(node)
self._enter_lexical_scope(node)
self.builder.enter_section(node)
@@ -693,6 +775,7 @@ class AstToCfg(gast.NodeVisitor):
self.visit(stmt)
self.builder.exit_section(node)
+ self.builder.end_statement(node)
def visit_Break(self, node):
self._process_exit_statement(node, gast.While, gast.For)
@@ -722,12 +805,13 @@ class AstToCfg(gast.NodeVisitor):
def visit_With(self, node):
# TODO(mdan): Mark the context manager's exit call as exit guard.
- self._process_basic_statement(node.items)
+ for item in node.items:
+ self._process_basic_statement(item)
for stmt in node.body:
self.visit(stmt)
def build(node):
- builder = AstToCfg()
- builder.visit(node)
- return builder.cfgs
+ visitor = AstToCfg()
+ visitor.visit(node)
+ return visitor.cfgs
diff --git a/tensorflow/contrib/autograph/pyct/cfg_test.py b/tensorflow/contrib/autograph/pyct/cfg_test.py
index 00afadd521..9d0a85d615 100644
--- a/tensorflow/contrib/autograph/pyct/cfg_test.py
+++ b/tensorflow/contrib/autograph/pyct/cfg_test.py
@@ -25,9 +25,13 @@ from tensorflow.python.platform import test
class CountingVisitor(cfg.GraphVisitor):
- def __init__(self):
+ def __init__(self, graph):
+ super(CountingVisitor, self).__init__(graph)
self.counts = {}
+ def init_state(self, _):
+ return None
+
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
@@ -51,8 +55,8 @@ class GraphVisitorTest(test.TestCase):
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
- visitor = CountingVisitor()
- visitor.visit_forward(graph)
+ visitor = CountingVisitor(graph)
+ visitor.visit_forward()
fn_node = node.body[0]
self.assertEqual(visitor.counts[fn_node.args], 1)
@@ -74,8 +78,8 @@ class GraphVisitorTest(test.TestCase):
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
- visitor = CountingVisitor()
- visitor.visit_reverse(graph)
+ visitor = CountingVisitor(graph)
+ visitor.visit_reverse()
fn_node = node.body[0]
self.assertEqual(visitor.counts[fn_node.args], 1)
@@ -94,7 +98,7 @@ class AstToCfgTest(test.TestCase):
return cfgs
def _repr_set(self, node_set):
- return set(repr(n) for n in node_set)
+ return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
@@ -110,14 +114,35 @@ class AstToCfgTest(test.TestCase):
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
- if (self._as_set(prev) == set(map(repr, cfg_node.prev)) and
- self._as_set(next_) == set(map(repr, cfg_node.next))):
+ if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
+ self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
+ def assertStatementEdges(self, graph, edges):
+ """Tests whether the CFG contains the specified statement edges."""
+ for prev_node_reprs, node_repr, next_node_reprs in edges:
+ matched = False
+ partial_matches = []
+ self.assertSetEqual(
+ frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
+ for stmt_ast_node in graph.stmt_next:
+ ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
+ stmt_ast_node.lineno)
+ if ast_repr == node_repr:
+ actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
+ actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
+ partial_matches.append((actual_prev, node_repr, actual_next))
+ if (self._as_set(prev_node_reprs) == actual_prev and
+ self._as_set(next_node_reprs) == actual_next):
+ matched = True
+ break
+ if not matched:
+ self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
+
def test_straightline(self):
def test_fn(a):
@@ -171,7 +196,7 @@ class AstToCfgTest(test.TestCase):
),
)
- def test_branch_straightline(self):
+ def test_if_straightline(self):
def test_fn(a):
if a > 0:
@@ -189,6 +214,10 @@ class AstToCfgTest(test.TestCase):
('(a > 0)', 'a += -1', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', None),),
+ )
def test_branch_nested(self):
@@ -219,6 +248,14 @@ class AstToCfgTest(test.TestCase):
('(a > 2)', 'a = 4', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'If:2', None),
+ ('(a > 0)', 'If:3', None),
+ ('(a > 0)', 'If:8', None),
+ ),
+ )
def test_branch_straightline_semi(self):
@@ -236,6 +273,10 @@ class AstToCfgTest(test.TestCase):
('(a > 0)', 'a = 1', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', None),),
+ )
def test_branch_return(self):
@@ -257,6 +298,10 @@ class AstToCfgTest(test.TestCase):
('a = 1', 'a = 2', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', 'a = 2'),),
+ )
def test_branch_return_minimal(self):
@@ -273,6 +318,10 @@ class AstToCfgTest(test.TestCase):
('(a > 0)', 'return', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'If:2', None),),
+ )
def test_while_straightline(self):
@@ -291,6 +340,10 @@ class AstToCfgTest(test.TestCase):
('(a > 0)', 'a = 2', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'While:2', 'a = 2'),),
+ )
def test_while_else_straightline(self):
@@ -312,6 +365,10 @@ class AstToCfgTest(test.TestCase):
('a = 2', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'While:2', 'a = 3'),),
+ )
def test_while_else_continue(self):
@@ -339,6 +396,13 @@ class AstToCfgTest(test.TestCase):
('a = 2', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
+ ),
+ )
def test_while_else_break(self):
@@ -364,6 +428,13 @@ class AstToCfgTest(test.TestCase):
(('break', 'a = 2'), 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
+ ),
+ )
def test_while_else_return(self):
@@ -389,6 +460,13 @@ class AstToCfgTest(test.TestCase):
('a = 2', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'If:3', 'a = 1'),
+ ),
+ )
def test_while_nested_straightline(self):
@@ -411,6 +489,13 @@ class AstToCfgTest(test.TestCase):
('(a > 0)', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'While:3', 'a = 2'),
+ ),
+ )
def test_while_nested_continue(self):
@@ -437,6 +522,14 @@ class AstToCfgTest(test.TestCase):
('(a > 0)', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'While:3', 'a = 2'),
+ ('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
+ ),
+ )
def test_while_nested_break(self):
@@ -451,16 +544,21 @@ class AstToCfgTest(test.TestCase):
graph, = self._build_cfg(test_fn).values()
- self.assertGraphMatches(
+ self.assertGraphMatches(graph, (
+ (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
+ (('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
+ ('(a > 1)', '(a > 2)', ('break', 'a = 1')),
+ ('(a > 2)', 'break', 'a = 2'),
+ ('(a > 2)', 'a = 1', '(a > 1)'),
+ (('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
+ ('(a > 0)', 'a = 3', None),
+ ))
+ self.assertStatementEdges(
graph,
(
- (('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
- (('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
- ('(a > 1)', '(a > 2)', ('break', 'a = 1')),
- ('(a > 2)', 'break', 'a = 2'),
- ('(a > 2)', 'a = 1', '(a > 1)'),
- (('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
- ('(a > 0)', 'a = 3', None),
+ ('a', 'While:2', 'a = 3'),
+ ('(a > 0)', 'While:3', 'a = 2'),
+ ('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
@@ -481,6 +579,10 @@ class AstToCfgTest(test.TestCase):
('range(0, a)', 'a = 2', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'For:2', 'a = 2'),),
+ )
def test_for_else_straightline(self):
@@ -502,6 +604,10 @@ class AstToCfgTest(test.TestCase):
('a = 2', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (('a', 'For:2', 'a = 3'),),
+ )
def test_for_else_continue(self):
@@ -530,6 +636,13 @@ class AstToCfgTest(test.TestCase):
('a = 2', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
+ ),
+ )
def test_for_else_break(self):
@@ -555,6 +668,13 @@ class AstToCfgTest(test.TestCase):
(('break', 'a = 2'), 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
+ ),
+ )
def test_for_else_return(self):
@@ -580,6 +700,13 @@ class AstToCfgTest(test.TestCase):
('a = 2', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'If:3', 'a = 1'),
+ ),
+ )
def test_for_nested_straightline(self):
@@ -602,6 +729,13 @@ class AstToCfgTest(test.TestCase):
('range(0, a)', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'For:3', 'a = 2'),
+ ),
+ )
def test_for_nested_continue(self):
@@ -629,6 +763,14 @@ class AstToCfgTest(test.TestCase):
('range(0, a)', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'For:3', 'a = 2'),
+ ('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
+ ),
+ )
def test_for_nested_break(self):
@@ -655,6 +797,14 @@ class AstToCfgTest(test.TestCase):
('range(0, a)', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('a', 'For:2', 'a = 3'),
+ ('range(0, a)', 'For:3', 'a = 2'),
+ ('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
+ ),
+ )
def test_complex(self):
@@ -704,6 +854,17 @@ class AstToCfgTest(test.TestCase):
('range(1, a)', 'a = 3', None),
),
)
+ self.assertStatementEdges(
+ graph,
+ (
+ ('b = 0', 'While:3', 'range(1, a)'),
+ ('(a > 0)', 'For:4', 'a = 2'),
+ ('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
+ ('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
+ ('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
+ ('(a > 0)', 'For:17', 'a = 3'),
+ ),
+ )
def test_finally_straightline(self):
@@ -785,6 +946,24 @@ class AstToCfgTest(test.TestCase):
),
)
+ def test_with_straightline(self):
+
+ def test_fn(a):
+ with max(a) as b:
+ a = 0
+ return b
+
+ graph, = self._build_cfg(test_fn).values()
+
+ self.assertGraphMatches(
+ graph,
+ (
+ ('a', 'max(a)', 'a = 0'),
+ ('max(a)', 'a = 0', 'return b'),
+ ('a = 0', 'return b', None),
+ ),
+ )
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/compiler.py b/tensorflow/contrib/autograph/pyct/compiler.py
index 24c4517afa..c90a5e89c2 100644
--- a/tensorflow/contrib/autograph/pyct/compiler.py
+++ b/tensorflow/contrib/autograph/pyct/compiler.py
@@ -30,9 +30,58 @@ import tempfile
import astor
import gast
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import ast_util
+from tensorflow.contrib.autograph.pyct import origin_info
+from tensorflow.contrib.autograph.pyct import parser
+
+
+def _build_source_map(node, code):
+ """Return the Python objects represented by given AST.
+
+ Compiling the AST code this way ensures that the source code is readable by
+ e.g. `pdb` or `inspect`.
+
+ Args:
+ node: An AST node of the original generated code, before the source code is
+ generated.
+ code: The string representation of the source code for the newly generated
+ code.
+
+ Returns:
+ Dict[CodeLocation, OriginInfo], a mapping between the user and AutoGraph
+ generated code.
+ """
+ # After we have the final generated code we reparse it to get the final line
+ # numbers. Then we walk through the generated and original ASTs in parallel
+ # to build the mapping between the user and generated code.
+ new_node = parser.parse_str(code)
+ origin_info.resolve(new_node, code)
+ source_mapping = {}
+ for before, after in ast_util.parallel_walk(node, new_node):
+ # Need both checks because if origin information is ever copied over to new
+ # nodes then we need to rely on the fact that only the original user code
+ # has the origin annotation.
+ if (anno.hasanno(before, anno.Basic.ORIGIN) and
+ anno.hasanno(after, anno.Basic.ORIGIN)):
+ source_info = anno.getanno(before, anno.Basic.ORIGIN)
+ new_line_number = anno.getanno(after, anno.Basic.ORIGIN).line_number
+ source_mapping[new_line_number] = source_info
+ return source_mapping
+
def ast_to_source(node, indentation=' '):
- """Return the source code of given AST."""
+ """Return the source code of given AST.
+
+ Args:
+ node: The code to compile, as an AST object.
+ indentation: The string to use for indentation.
+
+ Returns:
+ code: The source code generated from the AST object
+ source_mapping: A mapping between the user and AutoGraph generated code.
+ """
+ original_node = node
if isinstance(node, gast.AST):
node = gast.gast_to_ast(node)
generator = astor.codegen.SourceGenerator(indentation, False,
@@ -42,11 +91,16 @@ def ast_to_source(node, indentation=' '):
# In some versions of Python, literals may appear as actual values. This
# ensures everything is string.
code = map(str, generator.result)
- return astor.source_repr.pretty_source(code).lstrip()
+ code = astor.source_repr.pretty_source(code).lstrip()
+ source_mapping = _build_source_map(original_node, code)
+
+ return code, source_mapping
-def ast_to_object(
- node, indentation=' ', source_prefix=None, delete_on_exit=True):
+def ast_to_object(node,
+ indentation=' ',
+ source_prefix=None,
+ delete_on_exit=True):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
@@ -56,15 +110,31 @@ def ast_to_object(
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
source_prefix: Optional string to print as-is into the source file.
- delete_on_exit: Whether to delete the temporary file used for compilation
- on exit.
+ delete_on_exit: Whether to delete the temporary file used for compilation on
+ exit.
Returns:
- A module object containing the compiled source code.
+ compiled_node: A module object containing the compiled source code.
+ source: The source code of the compiled object
+ Raises:
+ ValueError: If ag_source_map__ is already in the namespace of the compiled
+ node.
"""
- source = ast_to_source(node, indentation)
+ # code_source_mapping does not yet include the offsets from import statements.
+ source, code_source_mapping = ast_to_source(node, indentation=indentation)
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
+ # TODO(znado): move into an _offset_source_map() helper function.
+ # Need to offset the generated line numbers by the number of import lines.
+ if source_prefix:
+ num_import_lines = source_prefix.count('\n') + 1
+ else:
+ num_import_lines = 0
+ source_mapping = {}
+ for line_number, original_position in code_source_mapping.items():
+ source_map_key = origin_info.CodeLocation(
+ file_path=f.name, line_number=line_number + num_import_lines)
+ source_mapping[source_map_key] = original_position
module_name = os.path.basename(f.name[:-3])
if source_prefix:
f.write(source_prefix)
@@ -72,4 +142,27 @@ def ast_to_object(
f.write(source)
if delete_on_exit:
atexit.register(lambda: os.remove(f.name))
- return imp.load_source(module_name, f.name), source
+ compiled_node = imp.load_source(module_name, f.name)
+
+ # TODO(znado): Clean this up so we don't need to attach it to the namespace.
+ # TODO(znado): This does not work for classes because their methods share a
+ # namespace.
+ # This attaches the source map which is needed for error handling. Note that
+ # api.to_graph copies this source map into an attribute of the function.
+ #
+ # We need this so the ag_source_map__ variable is available to the call to
+ # rewrite_graph_construction_error in the except block inside each function
+ # that handles graph construction errors.
+ #
+ # We cannot get the rewritten function name until it is too late so templating
+ # is hard, and this cleanly fixes the
+ # issues encountered with nested functions because this is attached to the
+ # outermost one.
+ source_map_name = 'ag_source_map__'
+ if source_map_name in compiled_node.__dict__:
+ raise ValueError('cannot convert %s because is has namespace attribute '
+ '"%s", which is reserved for AutoGraph.' %
+ (compiled_node, source_map_name))
+ compiled_node.__dict__[source_map_name] = source_mapping
+
+ return compiled_node, source
diff --git a/tensorflow/contrib/autograph/pyct/compiler_test.py b/tensorflow/contrib/autograph/pyct/compiler_test.py
index 98cdc1506b..e29fa9324c 100644
--- a/tensorflow/contrib/autograph/pyct/compiler_test.py
+++ b/tensorflow/contrib/autograph/pyct/compiler_test.py
@@ -59,14 +59,14 @@ class CompilerTest(test.TestCase):
value=gast.Str('c'))
])
+ source, _ = compiler.ast_to_source(node, indentation=' ')
self.assertEqual(
textwrap.dedent("""
if 1:
a = b
else:
a = 'c'
- """).strip(),
- compiler.ast_to_source(node, indentation=' ').strip())
+ """).strip(), source.strip())
def test_ast_to_object(self):
node = gast.FunctionDef(
diff --git a/tensorflow/contrib/autograph/pyct/origin_info.py b/tensorflow/contrib/autograph/pyct/origin_info.py
index b3c6a43d37..614e346634 100644
--- a/tensorflow/contrib/autograph/pyct/origin_info.py
+++ b/tensorflow/contrib/autograph/pyct/origin_info.py
@@ -17,10 +17,16 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from collections import namedtuple
+import collections
+import gast
-class CodeLocation(namedtuple('CodeLocation', ('file_path', 'line_number'))):
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.python.util import tf_inspect
+
+
+class CodeLocation(
+ collections.namedtuple('CodeLocation', ('file_path', 'line_number'))):
"""Location of a line of code.
Attributes:
@@ -31,8 +37,9 @@ class CodeLocation(namedtuple('CodeLocation', ('file_path', 'line_number'))):
class OriginInfo(
- namedtuple('OriginInfo', ('file_path', 'function_name', 'line_number',
- 'column_offset', 'source_code_line'))):
+ collections.namedtuple('OriginInfo',
+ ('file_path', 'function_name', 'line_number',
+ 'column_offset', 'source_code_line'))):
"""Container for information about the source code before conversion.
Instances of this class contain information about the source code that
@@ -50,3 +57,44 @@ class OriginInfo(
"""
return (self.file_path, self.line_number, self.function_name,
self.source_code_line)
+
+
+# TODO(znado): Consider refactoring this into a Visitor.
+def resolve(node, source, function=None):
+ """Adds an origin information to all nodes inside the body of function.
+
+ Args:
+ node: The AST node for the function whose body nodes will be annotated.
+ source: Text, the source code string for the function whose body nodes will
+ be annotated.
+ function: Callable, the function that will have all nodes inside of it
+ annotation with an OriginInfo annotation with key anno.Basic.ORIGIN. If
+ it is None then only the line numbers and column offset will be set in the
+ annotation, with the rest of the information being None.
+
+ Returns:
+ A tuple of the AST node for function and a String containing its source
+ code.
+ """
+ if function:
+ _, function_lineno = tf_inspect.getsourcelines(function)
+ function_filepath = tf_inspect.getsourcefile(function)
+ else:
+ function_lineno = None
+ function_filepath = None
+ source_lines = source.split('\n')
+ for n in gast.walk(node):
+ if hasattr(n, 'lineno'):
+ # n.lineno is relative to the start of the enclosing function, so need to
+ # offset it by the line of the function.
+ source_code_line = source_lines[n.lineno - 1]
+ if function:
+ source_lineno = n.lineno + function_lineno - 1
+ function_name = function.__name__
+ else:
+ source_lineno = n.lineno
+ function_name = None
+ anno.setanno(
+ n, anno.Basic.ORIGIN,
+ OriginInfo(function_filepath, function_name, source_lineno,
+ n.col_offset, source_code_line))
diff --git a/tensorflow/contrib/autograph/pyct/qual_names.py b/tensorflow/contrib/autograph/pyct/qual_names.py
index da07013cf4..fb81404edc 100644
--- a/tensorflow/contrib/autograph/pyct/qual_names.py
+++ b/tensorflow/contrib/autograph/pyct/qual_names.py
@@ -30,6 +30,7 @@ import collections
import gast
from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import parser
class Symbol(collections.namedtuple('Symbol', ['name'])):
@@ -89,7 +90,8 @@ class QN(object):
if not isinstance(base, (str, StringLiteral, NumberLiteral)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
- 'For simple QNs, base must be a string or a Literal object.')
+ 'for simple QNs, base must be a string or a Literal object;'
+ ' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
@@ -113,6 +115,22 @@ class QN(object):
return self._parent
@property
+ def owner_set(self):
+ """Returns all the symbols (simple or composite) that own this QN.
+
+ In other words, if this symbol was modified, the symbols in the owner set
+ may also be affected.
+
+ Examples:
+ 'a.b[c.d]' has two owners, 'a' and 'a.b'
+ """
+ owners = set()
+ if self.has_attr() or self.has_subscript():
+ owners.add(self.parent)
+ owners.update(self.parent.owner_set)
+ return owners
+
+ @property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
@@ -122,7 +140,7 @@ class QN(object):
Examples:
'a.b' has only one support symbol, 'a'
- 'a[i]' has two roots, 'a' and 'i'
+ 'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
@@ -231,3 +249,9 @@ class QnResolver(gast.NodeTransformer):
def resolve(node):
return QnResolver().visit(node)
+
+
+def from_str(qn_str):
+ node = parser.parse_expression(qn_str)
+ node = resolve(node)
+ return anno.getanno(node, anno.Basic.QN)
diff --git a/tensorflow/contrib/autograph/pyct/qual_names_test.py b/tensorflow/contrib/autograph/pyct/qual_names_test.py
index 264afd508c..c793c2bb39 100644
--- a/tensorflow/contrib/autograph/pyct/qual_names_test.py
+++ b/tensorflow/contrib/autograph/pyct/qual_names_test.py
@@ -30,6 +30,15 @@ from tensorflow.python.platform import test
class QNTest(test.TestCase):
+ def test_from_str(self):
+ a = QN('a')
+ b = QN('b')
+ a_dot_b = QN(a, attr='b')
+ a_sub_b = QN(a, subscript=b)
+ self.assertEqual(qual_names.from_str('a.b'), a_dot_b)
+ self.assertEqual(qual_names.from_str('a'), a)
+ self.assertEqual(qual_names.from_str('a[b]'), a_sub_b)
+
def test_basic(self):
a = QN('a')
self.assertEqual(a.qn, ('a',))
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/BUILD b/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
index bcf2dacec2..92eacba3fd 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/BUILD
@@ -19,8 +19,9 @@ py_library(
srcs = [
"activity.py",
"annos.py",
- "cfg.py",
"live_values.py",
+ "liveness.py",
+ "reaching_definitions.py",
"type_info.py",
],
srcs_version = "PY2AND3",
@@ -28,6 +29,7 @@ py_library(
deps = [
"//tensorflow/contrib/autograph/pyct",
"//tensorflow/contrib/autograph/utils",
+ "//tensorflow/python:util",
"@gast_archive//:gast",
],
)
@@ -46,23 +48,32 @@ py_test(
)
py_test(
- name = "cfg_test",
- srcs = ["cfg_test.py"],
+ name = "live_values_test",
+ srcs = ["live_values_test.py"],
srcs_version = "PY2AND3",
tags = ["no_windows"],
deps = [
":static_analysis",
"//tensorflow/contrib/autograph/pyct",
"//tensorflow/python:client_testlib",
- "@gast_archive//:gast",
],
)
py_test(
- name = "live_values_test",
- srcs = ["live_values_test.py"],
+ name = "liveness_test",
+ srcs = ["liveness_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":static_analysis",
+ "//tensorflow/contrib/autograph/pyct",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
+py_test(
+ name = "reaching_definitions_test",
+ srcs = ["reaching_definitions_test.py"],
srcs_version = "PY2AND3",
- tags = ["no_windows"],
deps = [
":static_analysis",
"//tensorflow/contrib/autograph/pyct",
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/activity.py b/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
index 4d7b0cbb7b..a0182da9d1 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/activity.py
@@ -12,7 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Activity analysis."""
+"""Activity analysis.
+
+Requires qualified name annotations (see qual_names.py).
+"""
from __future__ import absolute_import
from __future__ import division
@@ -59,9 +62,10 @@ class Scope(object):
self.parent = parent
self.add_unknown_symbols = add_unknown_symbols
self.modified = set()
+ # TODO(mdan): Completely remove this.
self.created = set()
self.used = set()
- self.params = set()
+ self.params = {}
self.returned = set()
# TODO(mdan): Rename to `locals`
@@ -106,37 +110,23 @@ class Scope(object):
self.modified |= other.modified
self.created |= other.created
self.used |= other.used
- self.params |= other.params
+ self.params.update(other.params)
self.returned |= other.returned
def has(self, name):
- if name in self.modified or name in self.params:
+ if name in self.modified:
return True
elif self.parent is not None:
return self.parent.has(name)
return False
- def is_modified_since_entry(self, name):
- if name in self.modified:
- return True
- elif self.parent is not None and not self.isolated:
- return self.parent.is_modified_since_entry(name)
- return False
-
- def is_param(self, name):
- if name in self.params:
- return True
- elif self.parent is not None and not self.isolated:
- return self.parent.is_param(name)
- return False
-
def mark_read(self, name):
self.used.add(name)
if self.parent is not None and name not in self.created:
self.parent.mark_read(name)
- def mark_param(self, name):
- self.params.add(name)
+ def mark_param(self, name, owner):
+ self.params[name] = owner
def mark_creation(self, name, writes_create_symbol=False):
"""Mark a qualified name as created."""
@@ -226,37 +216,56 @@ class ActivityAnalyzer(transformer.Base):
elif isinstance(node.ctx, gast.Param):
# Param contexts appear in function defs, so they have the meaning of
# defining a variable.
- # TODO(mdan): This may be incorrect with nested functions.
- # For nested functions, we'll have to add the notion of hiding args from
- # the parent scope, not writing to them.
- self.scope.mark_creation(qn)
- self.scope.mark_param(qn)
+ self.scope.mark_write(qn)
+ self.scope.mark_param(qn, self.enclosing_entities[-1])
else:
raise ValueError('Unknown context %s for node %s.' % (type(node.ctx), qn))
anno.setanno(node, NodeAnno.IS_LOCAL, self.scope.has(qn))
- anno.setanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY,
- self.scope.is_modified_since_entry(qn))
- anno.setanno(node, NodeAnno.IS_PARAM, self.scope.is_param(qn))
if self._in_return_statement:
self.scope.mark_returned(qn)
+ def _enter_scope(self, isolated):
+ self.scope = Scope(self.scope, isolated=isolated)
+
+ def _exit_scope(self):
+ self.scope = self.scope.parent
+
+ def _process_statement(self, node):
+ self._enter_scope(False)
+ node = self.generic_visit(node)
+ anno.setanno(node, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
+ return node
+
+ def visit_Expr(self, node):
+ return self._process_statement(node)
+
+ def visit_Return(self, node):
+ self._in_return_statement = True
+ node = self._process_statement(node)
+ self._in_return_statement = False
+ return node
+
+ def visit_Assign(self, node):
+ return self._process_statement(node)
+
def visit_AugAssign(self, node):
# Special rules for AugAssign. In Assign, the target is only written,
# but in AugAssig (e.g. a += b), the target is both read and written.
self._in_aug_assign = True
- self.generic_visit(node)
+ node = self._process_statement(node)
self._in_aug_assign = False
return node
def visit_Name(self, node):
- self.generic_visit(node)
+ node = self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Attribute(self, node):
- self.generic_visit(node)
+ node = self.generic_visit(node)
if self._in_constructor and self._node_sets_self_attribute(node):
self._track_symbol(
node, composite_writes_alter_parent=True, writes_create_symbol=True)
@@ -265,44 +274,38 @@ class ActivityAnalyzer(transformer.Base):
return node
def visit_Subscript(self, node):
- self.generic_visit(node)
+ node = self.generic_visit(node)
# Subscript writes (e.g. a[b] = "value") are considered to modify
# both the element itself (a[b]) and its parent (a).
- self._track_symbol(node, composite_writes_alter_parent=True)
+ self._track_symbol(node)
return node
def visit_Print(self, node):
- current_scope = self.scope
- args_scope = Scope(current_scope)
- self.scope = args_scope
- for n in node.values:
- self.visit(n)
- anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
- self.scope = current_scope
+ self._enter_scope(False)
+ node.values = self.visit_block(node.values)
+ anno.setanno(node, anno.Static.SCOPE, self.scope)
+ anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
+ self._exit_scope()
return node
+ def visit_Assert(self, node):
+ return self._process_statement(node)
+
def visit_Call(self, node):
- current_scope = self.scope
- args_scope = Scope(current_scope, isolated=False)
- self.scope = args_scope
- for n in node.args:
- self.visit(n)
+ self._enter_scope(False)
+ node.args = self.visit_block(node.args)
+ node.keywords = self.visit_block(node.keywords)
# TODO(mdan): Account starargs, kwargs
- for n in node.keywords:
- self.visit(n)
- anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
- self.scope = current_scope
- self.visit(node.func)
+ anno.setanno(node, NodeAnno.ARGS_SCOPE, self.scope)
+ self._exit_scope()
+ node.func = self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
- current_scope = self.scope
- block_scope = Scope(current_scope, isolated=False)
- self.scope = block_scope
- for n in block:
- self.visit(n)
- anno.setanno(node, scope_name, block_scope)
- self.scope = current_scope
+ self._enter_scope(False)
+ block = self.visit_block(block)
+ anno.setanno(node, scope_name, self.scope)
+ self._exit_scope()
return node
def _process_parallel_blocks(self, parent, children):
@@ -321,94 +324,75 @@ class ActivityAnalyzer(transformer.Base):
self.scope.merge_from(after_child)
return parent
+ def visit_arguments(self, node):
+ return self._process_statement(node)
+
def visit_FunctionDef(self, node):
- if self.scope:
- qn = qual_names.QN(node.name)
- self.scope.mark_write(qn)
- current_scope = self.scope
- body_scope = Scope(current_scope, isolated=True)
- self.scope = body_scope
- self.generic_visit(node)
- anno.setanno(node, NodeAnno.BODY_SCOPE, body_scope)
- self.scope = current_scope
+ # The FunctionDef node itself has a Scope object that tracks the creation
+ # of its name, along with the usage of any decorator accompany it.
+ self._enter_scope(False)
+ node.decorator_list = self.visit_block(node.decorator_list)
+ self.scope.mark_write(qual_names.QN(node.name))
+ anno.setanno(node, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
+
+ # A separate Scope tracks the actual function definition.
+ self._enter_scope(True)
+ node.args = self.visit(node.args)
+
+ # Track the body separately. This is for compatibility reasons, it may not
+ # be strictly needed.
+ self._enter_scope(False)
+ node.body = self.visit_block(node.body)
+ anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
+ self._exit_scope()
+
+ self._exit_scope()
return node
def visit_With(self, node):
- current_scope = self.scope
- with_scope = Scope(current_scope, isolated=False)
- self.scope = with_scope
- self.generic_visit(node)
- anno.setanno(node, NodeAnno.BODY_SCOPE, with_scope)
- self.scope = current_scope
+ self._enter_scope(False)
+ node = self.generic_visit(node)
+ anno.setanno(node, NodeAnno.BODY_SCOPE, self.scope)
+ self._exit_scope()
return node
- def visit_If(self, node):
- current_scope = self.scope
- cond_scope = Scope(current_scope, isolated=False)
- self.scope = cond_scope
- self.visit(node.test)
- anno.setanno(node, NodeAnno.COND_SCOPE, cond_scope)
- self.scope = current_scope
+ def visit_withitem(self, node):
+ return self._process_statement(node)
+ def visit_If(self, node):
+ self._enter_scope(False)
+ node.test = self.visit(node.test)
+ anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
+ anno.setanno(node.test, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
- self.visit(node.target)
- self.visit(node.iter)
+ self._enter_scope(False)
+ node.target = self.visit(node.target)
+ node.iter = self.visit(node.iter)
+ anno.setanno(node.iter, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
- current_scope = self.scope
- cond_scope = Scope(current_scope, isolated=False)
- self.scope = cond_scope
- self.visit(node.test)
- anno.setanno(node, NodeAnno.COND_SCOPE, cond_scope)
- self.scope = current_scope
-
+ self._enter_scope(False)
+ node.test = self.visit(node.test)
+ anno.setanno(node, NodeAnno.COND_SCOPE, self.scope)
+ anno.setanno(node.test, anno.Static.SCOPE, self.scope)
+ self._exit_scope()
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
- def visit_Return(self, node):
- self._in_return_statement = True
- node = self.generic_visit(node)
- self._in_return_statement = False
- return node
-
-
-def get_read(node, context):
- """Return the variable names as QNs (qual_names.py) read by this statement."""
- analyzer = ActivityAnalyzer(context, None, True)
- analyzer.visit(node)
- return analyzer.scope.used
-
-
-def get_updated(node, context):
- """Return the variable names created or mutated by this statement.
-
- This function considers assign statements, augmented assign statements, and
- the targets of for loops, as well as function arguments.
- For example, `x[0] = 2` will return `x`, `x, y = 3, 4` will return `x` and
- `y`, `for i in range(x)` will return `i`, etc.
- Args:
- node: An AST node
- context: An EntityContext instance
-
- Returns:
- A set of variable names (QNs, see qual_names.py) of all the variables
- created or mutated.
- """
- analyzer = ActivityAnalyzer(context, None, True)
- analyzer.visit(node)
- return analyzer.scope.created | analyzer.scope.modified
-
def resolve(node, context, parent_scope=None):
return ActivityAnalyzer(context, parent_scope).visit(node)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
index bc22be0a27..e940516190 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/activity_test.py
@@ -52,18 +52,18 @@ class ScopeTest(test.TestCase):
other = activity.Scope(None)
other.copy_from(scope)
- self.assertTrue(QN('foo') in other.created)
+ self.assertTrue(QN('foo') in other.modified)
scope.mark_write(QN('bar'))
scope.copy_from(other)
- self.assertFalse(QN('bar') in scope.created)
+ self.assertFalse(QN('bar') in scope.modified)
scope.mark_write(QN('bar'))
scope.merge_from(other)
- self.assertTrue(QN('bar') in scope.created)
- self.assertFalse(QN('bar') in other.created)
+ self.assertTrue(QN('bar') in scope.modified)
+ self.assertFalse(QN('bar') in other.modified)
def test_copy_of(self):
scope = activity.Scope(None)
@@ -157,7 +157,8 @@ class ActivityAnalyzerTest(test.TestCase):
"""Assert the scope contains specific used, modified & created variables."""
self.assertSymbolSetsAre(used, scope.used, 'read')
self.assertSymbolSetsAre(modified, scope.modified, 'modified')
- self.assertSymbolSetsAre(created, scope.created, 'created')
+ # Created is deprecated, we're no longer verifying it.
+ # self.assertSymbolSetsAre(created, scope.created, 'created')
def test_print_statement(self):
@@ -215,12 +216,6 @@ class ActivityAnalyzerTest(test.TestCase):
(),
(),
)
- self.assertScopeIsRmc(
- anno.getanno(call_node, NodeAnno.ARGS_SCOPE).parent,
- ('a', 'a.b', 'a.c', 'a.d', 'foo'),
- ('a.c',),
- ('a',),
- )
def test_call_args_subscripts(self):
@@ -241,12 +236,6 @@ class ActivityAnalyzerTest(test.TestCase):
(),
(),
)
- self.assertScopeIsRmc(
- anno.getanno(call_node, NodeAnno.ARGS_SCOPE).parent,
- ('a', 'a[0]', 'a[b]', 'a[c]', 'b', 'c', 'foo'),
- ('b', 'c'),
- ('a', 'b', 'c'),
- )
def test_while(self):
@@ -362,20 +351,20 @@ class ActivityAnalyzerTest(test.TestCase):
self.assertScopeIsRmc(
anno.getanno(if_node, NodeAnno.BODY_SCOPE),
('a', 'b', 'c', 'a[c]'),
- ('a', 'a[b]', 'd'),
+ ('a[b]', 'd'),
('d',),
)
# TODO(mdan): Should subscript writes (a[0] = 1) be considered to read "a"?
self.assertScopeIsRmc(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE),
('a', 'e'),
- ('a', 'a[0]', 'd'),
+ ('a[0]', 'd'),
('d',),
)
self.assertScopeIsRmc(
anno.getanno(if_node, NodeAnno.ORELSE_SCOPE).parent,
('a', 'b', 'c', 'd', 'e', 'a[c]'),
- ('a', 'd', 'a[b]', 'a[0]'),
+ ('d', 'a[b]', 'a[0]'),
('a', 'b', 'c', 'd', 'e'),
)
@@ -416,10 +405,6 @@ class ActivityAnalyzerTest(test.TestCase):
fn_def_node = node.body[0].body[0]
self.assertScopeIsRmc(
- anno.getanno(fn_def_node,
- NodeAnno.BODY_SCOPE).parent, ('b', 'i', 'f', 'c', 'a'),
- ('f', 'b', 'c', 'i'), ('f', 'a', 'b', 'c', 'i'))
- self.assertScopeIsRmc(
anno.getanno(fn_def_node, NodeAnno.BODY_SCOPE), ('x', 'y'), ('y',), (
'x',
'y',
@@ -452,7 +437,7 @@ class ActivityAnalyzerTest(test.TestCase):
self.assertScopeIsRmc(
anno.getanno(fn_node, NodeAnno.BODY_SCOPE),
('a', 'a[0]'),
- ('a', 'a[0]'),
+ ('a[0]',),
('a',),
)
@@ -518,47 +503,6 @@ class ActivityAnalyzerTest(test.TestCase):
anno.getanno(fn_node, NodeAnno.BODY_SCOPE), ('b',), (('')),
(('a', 'b')))
- def test_get_read(self):
-
- def test_fn(x, y):
- z = test_fn(x, y)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn)
- node = node.body[0].body[0]
- read_vars = activity.get_read(node, ctx)
- self.assertEqual(read_vars, set(map(qual_names.QN, ('test_fn', 'x', 'y'))))
-
- def test_fn2(x, y, z):
- z += test_fn2(x, y, z)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn2)
- node = node.body[0].body[0]
- read_vars = activity.get_read(node, ctx)
- self.assertEqual(read_vars,
- set(map(qual_names.QN, ('test_fn2', 'x', 'y', 'z'))))
-
- def test_get_updated(self):
-
- def test_fn(x, y):
- z = test_fn(x, y)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn)
- node = node.body[0].body[0]
- updated_vars = activity.get_updated(node, ctx)
- self.assertEqual(updated_vars, set(map(qual_names.QN, ('z'))))
-
- def test_fn2(x, y, z):
- z += test_fn2(x, y, z)
- return z
-
- node, ctx = self._parse_and_analyze(test_fn2)
- node = node.body[0].body[0]
- updated_vars = activity.get_updated(node, ctx)
- self.assertEqual(updated_vars, set(map(qual_names.QN, ('z'))))
-
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/annos.py b/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
index b929b35b79..5eefecf278 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/annos.py
@@ -21,6 +21,9 @@ from __future__ import print_function
from enum import Enum
+# TODO(mdan): Remove.
+
+
class NoValue(Enum):
def __repr__(self):
@@ -50,10 +53,3 @@ class NodeAnno(NoValue):
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
-
- # Type and Value annotations
- # Type annotations are represented by objects of type type_info.Type.
- STATIC_INFO = (
- 'The type or value information that should be asserted about the entity '
- 'referenced by the symbol holding this annotation, irrespective of the '
- 'execution context.')
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py b/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
deleted file mode 100644
index 4acc4ed66a..0000000000
--- a/tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Control flow graph analysis.
-
-Given a Python AST we construct a control flow graph, with edges both to the
-next and previous statements (so it can easily walk the graph both ways). Its
-nodes contain the AST of the statements. It can then perform forward or backward
-analysis on this CFG.
-"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from collections import namedtuple
-import functools
-import operator
-
-import gast
-
-from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct.static_analysis import activity
-
-
-class CfgNode(object):
- """A node in the CFG."""
- __slots__ = ['next', 'value', 'prev']
-
- def __init__(self, value):
- self.next = set()
- self.prev = set()
- self.value = value
-
-
-class Cfg(namedtuple('Cfg', ['entry', 'exit'])):
- """A Control Flow Graph.
-
- Each statement is represented as a node. For control flow statements such
- as conditionals and loops the conditional itself is a node which either
- branches or cycles, respectively.
- Attributes:
- entry: The entry node, which contains the `gast.arguments` node of the
- function definition.
- exit: The exit node. This node is special because it has no value (i.e. no
- corresponding AST node). This is because Python functions can have
- multiple return statements.
- """
- pass
-
-
-class CfgBuilder(gast.NodeVisitor):
- """Construct a control flow graph.
-
- Construct a CFG starting from a FunctionDef node.
- Usage:
- cfg_obj = CfgBuilder().build_cfg(fndef_node)
- """
-
- def __init__(self):
- # The current leaves of the CFG
- self.current_leaves = []
- # TODO(alexbw): generalize to break, return, continue, yield, etc.
- # A stack of lists, tracking continue statements
- self.continue_ = []
- # A stack of lists tracking break nodes
- self.break_ = []
-
- def set_current_leaves(self, cfg_node):
- """Link this cfg_node to the current leaves.
-
- This is the central function for building the CFG. It links the current
- head cfg_nodes to the passed cfg_node. It then resets the head to the
- passed cfg_node.
-
- Args:
- cfg_node: A CfgNode instance.
- """
- for head in self.current_leaves:
- head.next.add(cfg_node)
- # While we're linking the CFG forward, add backlinks
- cfg_node.prev.add(head)
- self.current_leaves = [cfg_node]
-
- def build_cfg(self, node):
- """Build a CFG for a function.
-
- Implementation of building a CFG for dataflow analysis. See, e.g.:
- https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
-
- Args:
- node: A function definition the body of which to analyze.
- Returns:
- A CFG object.
- Raises:
- TypeError: If the input is not a function definition.
- """
- if not isinstance(node, gast.FunctionDef):
- raise TypeError('input must be a function definition')
- entry_cfg_node = CfgNode(node.args)
- self.current_leaves = [entry_cfg_node]
- self.visit_statements(node.body)
- exit_cfg_node = CfgNode(None)
- self.set_current_leaves(exit_cfg_node)
- return Cfg(entry_cfg_node, exit_cfg_node)
-
- def visit_statements(self, nodes):
- for node in nodes:
- # Check for control flow
- if isinstance(node, (gast.For, gast.While, gast.If, gast.Try, gast.Break,
- gast.Continue, gast.With)):
- self.visit(node)
- else:
- expr = CfgNode(node)
- self.set_current_leaves(expr)
-
- def generic_visit(self, node):
- raise ValueError('unknown control flow')
-
- def visit_If(self, node):
- # TODO(alexbw): change this to use immutable tuples instead of lists
- # The current head will hold the conditional
- test = CfgNode(node.test)
- self.set_current_leaves(test)
- # Handle the body
- self.visit_statements(node.body)
- body_exit = self.current_leaves
- self.current_leaves = [test]
- # Handle the orelse
- self.visit_statements(node.orelse)
- self.current_leaves.extend(body_exit)
-
- def visit_While(self, node):
- test = CfgNode(node.test)
- self.set_current_leaves(test)
- # Start a new level of nesting
- self.break_.append([])
- self.continue_.append([])
- # Handle the body
- self.visit_statements(node.body)
- body_exit = self.current_leaves
- self.current_leaves.extend(self.continue_.pop())
- self.set_current_leaves(test)
- # Handle the orelse
- self.visit_statements(node.orelse)
- # The break statements and the test go to the next node
- self.current_leaves.extend(self.break_.pop())
- # Body and orelse statements can reach out of the loop
- self.current_leaves.extend(body_exit)
-
- def visit_For(self, node):
- iter_ = CfgNode(node.iter)
- self.set_current_leaves(iter_)
- self.break_.append([])
- self.continue_.append([])
- self.visit_statements(node.body)
- body_exit = self.current_leaves
- self.current_leaves.extend(self.continue_.pop())
- self.set_current_leaves(iter_)
- # Handle the orelse
- self.visit_statements(node.orelse)
- # The break statements and the test go to the next node
- self.current_leaves.extend(self.break_.pop())
- # Body and orelse statements can reach out of the loop
- self.current_leaves.extend(body_exit)
-
- def visit_Break(self, node):
- self.break_[-1].extend(self.current_leaves)
- self.current_leaves[:] = []
-
- def visit_Continue(self, node):
- self.continue_[-1].extend(self.current_leaves)
- self.current_leaves[:] = []
-
- def visit_Try(self, node):
- self.visit_statements(node.body)
- body = self.current_leaves
- handlers = []
- for handler in node.handlers:
- self.current_leaves = body[:]
- self.visit_statements(handler.body)
- handlers.extend(self.current_leaves)
- self.current_leaves = body
- self.visit_statements(node.orelse)
- self.current_leaves = handlers + self.current_leaves
- self.visit_statements(node.finalbody)
-
- def visit_With(self, node):
- for item in node.items:
- self.set_current_leaves(CfgNode(item))
- self.visit_statements(node.body)
-
-
-# TODO(alexbw): once CFG analysis occurs at a block level,
-# this extra class will not be necessary
-class PropagateAnalysis(gast.NodeVisitor):
- """Port analysis annotations from statements to their enclosing blocks."""
-
- def __init__(self, analysis):
- self.transfer_fn = analysis.transfer_fn
- self.in_label = analysis.in_label
- self.out_label = analysis.out_label
- super(PropagateAnalysis, self).__init__()
-
- def visit_If(self, node):
- # Depth-first.
- self.generic_visit(node)
- incoming = anno.getanno(node.body[0], self.in_label)
- incoming |= anno.getanno(node.test, self.in_label)
- outgoing = anno.getanno(node.body[-1], self.out_label)
- outgoing |= anno.getanno(node.test, self.out_label)
- if node.orelse:
- orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
- outgoing = self.transfer_fn(outgoing, orelse_outgoing)
- anno.setanno(node, self.in_label, incoming)
- anno.setanno(node, self.out_label, outgoing)
-
- def visit_For(self, node):
- self.generic_visit(node)
- incoming = set(anno.getanno(node.body[0], self.in_label))
- incoming -= set((anno.getanno(node.target, anno.Basic.QN),))
- outgoing = anno.getanno(node.body[-1], self.out_label)
- if node.orelse:
- orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
- outgoing = self.transfer_fn(outgoing, orelse_outgoing)
- anno.setanno(node, self.in_label, frozenset(incoming))
- anno.setanno(node, self.out_label, outgoing)
-
- def visit_While(self, node):
- self.generic_visit(node)
- incoming = anno.getanno(node.body[0], self.in_label)
- incoming |= anno.getanno(node.test, self.in_label)
- outgoing = anno.getanno(node.body[-1], self.out_label)
- if node.orelse:
- orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
- outgoing = self.transfer_fn(outgoing, orelse_outgoing)
- anno.setanno(node, self.in_label, incoming)
- anno.setanno(node, self.out_label, outgoing)
-
- def visit_With(self, node):
- self.generic_visit(node)
- incoming = anno.getanno(node.body[0], self.in_label)
- for item in node.items:
- incoming |= anno.getanno(item, self.in_label)
- outgoing = anno.getanno(node.body[-1], self.out_label)
- anno.setanno(node, self.in_label, incoming)
- anno.setanno(node, self.out_label, outgoing)
-
-
-# TODO(alexbw): Abstract the CFG walking machinery into a superclass
-# which is parameterized on which fields it selects when walking.
-# TODO(alexbw): Abstract the application of dataflow analysis
-class Forward(object):
- """Forward analysis on CFG.
-
- Args:
- label: A name for this analysis e.g. 'active' for activity analysis. The AST
- nodes in the CFG will be given annotations 'name_in', 'name_out',
- 'name_gen' and 'name_kill' which contain the incoming values, outgoing
- values, values generated by the statement, and values deleted by the
- statement respectively.
- transfer_fn: Either the AND or OR operator. If the AND operator is used it
- turns into forward must analysis (i.e. a value will only be carried
- forward if it appears on all incoming paths). The OR operator means that
- forward may analysis is done (i.e. the union of incoming values will be
- taken).
- """
-
- def __init__(self, label, source_info, transfer_fn=operator.or_):
- self.transfer_fn = transfer_fn
- self.source_info = source_info
- self.out_label = label + '_out'
- self.in_label = label + '_in'
- self.gen_label = label + '_gen'
- self.kill_label = label + '_kill'
-
- # TODO(alexbw): see if we can simplify by visiting breadth-first
- def visit(self, node):
- """Depth-first walking the CFG, applying dataflow info propagation."""
- # node.value is None only for the exit CfgNode.
- if not node.value:
- return
-
- if anno.hasanno(node.value, self.out_label):
- before = hash(anno.getanno(node.value, self.out_label))
- else:
- before = None
- preds = [
- anno.getanno(pred.value, self.out_label)
- for pred in node.prev
- if anno.hasanno(pred.value, self.out_label)
- ]
- if preds:
- incoming = functools.reduce(self.transfer_fn, preds[1:], preds[0])
- else:
- incoming = frozenset()
- anno.setanno(node.value, self.in_label, incoming)
- gen, kill = self.get_gen_kill(node, incoming)
- anno.setanno(node.value, self.gen_label, gen)
- anno.setanno(node.value, self.kill_label, kill)
- anno.setanno(node.value, self.out_label, (incoming - kill) | gen)
-
- if hash(anno.getanno(node.value, self.out_label)) != before:
- for succ in node.next:
- self.visit(succ)
-
- def get_gen_kill(self, cfg_node, incoming):
- """Calculate Gen and Kill properties of a CFG node in dataflow analysis.
-
- A function which takes the CFG node as well as a set of incoming
- values. It must return a set of newly generated values by the statement as
- well as a set of deleted (killed) values.
-
- Args:
- cfg_node: A CfgNode instance.
- incoming:
- """
- raise NotImplementedError()
-
-
-class Backward(Forward):
- """Backward analysis on CFG."""
-
- def visit(self, cfg_node):
- # cfg_node.value is None for the exit node, which will be visited only once
- if not cfg_node.value:
- for pred in cfg_node.prev:
- self.visit(pred)
- return
-
- if anno.hasanno(cfg_node.value, self.in_label):
- before = hash(anno.getanno(cfg_node.value, self.in_label))
- else:
- before = None
- succs = [
- anno.getanno(succ.value, self.in_label)
- for succ in cfg_node.next
- if anno.hasanno(succ.value, self.in_label)
- ]
- if succs:
- incoming = functools.reduce(self.transfer_fn, succs[1:], succs[0])
- else:
- incoming = frozenset()
- anno.setanno(cfg_node.value, self.out_label, incoming)
- gen, kill = self.get_gen_kill(cfg_node, incoming)
- anno.setanno(cfg_node.value, self.gen_label, gen)
- anno.setanno(cfg_node.value, self.kill_label, kill)
- anno.setanno(cfg_node.value, self.in_label, (incoming - kill) | gen)
- if hash(anno.getanno(cfg_node.value, self.in_label)) != before:
- for pred in cfg_node.prev:
- self.visit(pred)
-
-
-def run_analyses(node, analyses):
- """Perform dataflow analysis on all functions within an AST.
-
- Args:
- node: An AST node on which to run dataflow analysis.
- analyses: Either an instance of the Forward or Backward dataflow analysis
- class, or a list or tuple of them.
-
- Returns:
- node: The node, but now with annotations on the AST nodes containing the
- results of the dataflow analyses.
- """
- if not isinstance(analyses, (tuple, list)):
- analyses = (analyses,)
- for analysis in analyses:
- if not isinstance(analysis, (Forward, Backward)):
- raise TypeError('not a valid forward analysis object')
-
- for child_node in gast.walk(node):
- if isinstance(child_node, gast.FunctionDef):
- cfg_obj = CfgBuilder().build_cfg(child_node)
- for analysis in analyses:
- if isinstance(analysis, Backward):
- analysis.visit(cfg_obj.exit)
- elif isinstance(analysis, Forward):
- analysis.visit(cfg_obj.entry)
- for analysis in analyses:
- PropagateAnalysis(analysis).visit(node)
- return node
-
-
-class Liveness(Backward):
- """Perform a liveness analysis.
-
- Each statement is annotated with a set of variables that may be used
- later in the program.
- """
-
- def __init__(self, source_info):
- super(Liveness, self).__init__('live', source_info)
-
- def get_gen_kill(self, node, _):
- # A variable's parents are live if it is live
- # e.g. x is live if x.y is live. This means gen needs to return
- # all parents of a variable (if it's an Attribute or Subscript).
- # This doesn't apply to kill (e.g. del x.y doesn't affect liveness of x)
- gen = activity.get_read(node.value, self.source_info)
- gen = functools.reduce(lambda left, right: left | right.support_set, gen,
- gen)
- kill = activity.get_updated(node.value, self.source_info)
- return gen, kill
-
-
-class ReachingDefinitions(Forward):
- """Perform reaching definition analysis.
-
- Each statement is annotated with a set of (variable, definition) pairs.
- """
-
- def __init__(self, source_info):
- super(ReachingDefinitions, self).__init__('definitions', source_info)
-
- def get_gen_kill(self, node, incoming):
- definitions = activity.get_updated(node.value, self.source_info)
- gen = frozenset((id_, node.value) for id_ in definitions)
- kill = frozenset(def_ for def_ in incoming if def_[0] in definitions)
- return gen, kill
-
-
-class Defined(Forward):
- """Perform defined variable analysis.
-
- Each statement is annotated with a set of variables which are guaranteed to
- be defined at that point.
- """
-
- def __init__(self, source_info):
- super(Defined, self).__init__(
- 'defined', source_info, transfer_fn=operator.and_)
-
- def get_gen_kill(self, node, _):
- gen = activity.get_updated(node.value, self.source_info)
- return gen, frozenset()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py
deleted file mode 100644
index 428ebbedca..0000000000
--- a/tensorflow/contrib/autograph/pyct/static_analysis/cfg_test.py
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Tests for cfg module."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import functools
-
-import gast
-
-from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct import parser
-from tensorflow.contrib.autograph.pyct import qual_names
-from tensorflow.contrib.autograph.pyct import transformer
-from tensorflow.contrib.autograph.pyct.static_analysis import cfg
-from tensorflow.python.platform import test
-
-
-class CFGTest(test.TestCase):
-
- def _parse_and_analyze(self, test_fn):
- node, source = parser.parse_entity(test_fn)
- entity_info = transformer.EntityInfo(
- source_code=source,
- source_file=None,
- namespace={},
- arg_values=None,
- arg_types=None,
- owner_type=None)
- node = qual_names.resolve(node)
- return node, entity_info
-
- def _check_anno_matches(self, node, anno_name, var_names):
- if isinstance(var_names, str):
- var_names = (var_names,)
- qual_vars = set()
- for var_name in var_names:
- if isinstance(var_name, str):
- if '[' in var_name or ']' in var_name:
- raise ValueError('Annotation matching not supported with subscript.')
- if '.' not in var_name:
- qual_vars.add(qual_names.QN(var_name))
- else:
- attrs = var_name.split('.')
- this_qn = functools.reduce(qual_names.QN, attrs[1:],
- qual_names.QN(attrs[0]))
- qual_vars.add(this_qn)
- self.assertEqual(anno.getanno(node, anno_name), qual_vars)
-
- def test_reaching(self):
-
- def f(x):
- print(x)
- while True:
- x = x
- x = x
- return x
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.ReachingDefinitions(ctx))
- body = node.body[0].body
- # Only the argument reaches the expression
- def_in = anno.getanno(body[0], 'definitions_in')
- # One element, x, from arguments
- self.assertEqual(set(type(d[1]) for d in def_in), set((gast.arguments,)))
-
- while_body = body[1].body
- def_in = anno.getanno(while_body[0], 'definitions_in')
- # One definition, two possible sources.
- # - One from an assignment (if the loop is entered)
- # - The other from the arguments (if loop is not entered)
- self.assertEqual(
- set(type(d[1]) for d in def_in), set((gast.arguments, gast.Assign)))
-
- def_in = anno.getanno(while_body[1], 'definitions_in')
- # If we've reached this line, the only reaching definition of x is the
- # Assign node in previous line
- self.assertEqual(set(type(d[1]) for d in def_in), set((gast.Assign,)))
-
- def_in = anno.getanno(body[2], 'definitions_in')
- # Same situation as while_body[0]
- self.assertEqual(
- set(type(d[1]) for d in def_in), set((gast.arguments, gast.Assign)))
-
- def test_defined(self):
-
- def f(x):
- if x:
- y = 2 # pylint: disable=unused-variable
- return x
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Defined(ctx))
- body = node.body[0].body
- # only x is for sure defined at the end
- self._check_anno_matches(body[1], 'defined_in', 'x')
- # at the end of the if body both x and y are defined
- if_body = body[0].body
- self._check_anno_matches(if_body[0], 'defined_out', ('x', 'y'))
-
- def _get_live_annotated_fnbody(self, f):
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Liveness(ctx))
- body = node.body[0].body
- return body
-
- def test_live_straightline(self):
-
- def f1(x):
- a = g(x) # pylint: disable=undefined-variable
- b = h(a) # pylint: disable=undefined-variable, unused-variable
- return x
-
- body = self._get_live_annotated_fnbody(f1)
- self._check_anno_matches(body[1], 'live_in', ('a', 'h', 'x'))
- self._check_anno_matches(body[2], 'live_in', ('x'))
- self._check_anno_matches(body[0], 'live_in', ('g', 'h', 'x'))
- self._check_anno_matches(body[2], 'live_out', ())
-
- def test_live_stacked_conds_with_else(self):
-
- def f2(x, a): # pylint: disable=unused-argument
- if a > 0: # x should not be live
- x = 0
- if a > 1:
- x = 1
- else:
- x = 2
-
- body = self._get_live_annotated_fnbody(f2)
- self._check_anno_matches(body[0], 'live_in', ('a'))
- self._check_anno_matches(body[1], 'live_in', ('a'))
-
- def test_live_stacked_conds(self):
-
- def f3(x, a):
- if a > 0: # x and a should be live
- x = 0
- if a > 1: # x and a should be live_in
- x = 1
- return x # x should be live
-
- body = self._get_live_annotated_fnbody(f3)
- self._check_anno_matches(body[0], 'live_in', ('a', 'x'))
- self._check_anno_matches(body[1], 'live_in', ('a', 'x'))
- self._check_anno_matches(body[2], 'live_in', ('x'))
-
- def test_live_possibly_unused_cond(self):
-
- def f4(x, a):
- if a > 0: # x should be live
- x = 0
- x += 1
-
- body = self._get_live_annotated_fnbody(f4)
- self._check_anno_matches(body[0], 'live_in', ('x', 'a'))
- self._check_anno_matches(body[1], 'live_in', ('x'))
-
- def test_live_attribute_in_cond(self):
-
- def f5(x, a):
- if a > 0: # x.y should be live
- x.y = 0
- return x.y
-
- body = self._get_live_annotated_fnbody(f5)
- self._check_anno_matches(body[0], 'live_in', ('x', 'x.y', 'a'))
-
- def test_live_noop(self):
-
- def f6(x):
- return x # should this cause x.* to be live?
-
- body = self._get_live_annotated_fnbody(f6)
- self._check_anno_matches(body[0], 'live_in', ('x'))
-
- def test_live_loop(self):
-
- def f7(x, n):
- for i in range(n):
- x += i
- return x
-
- body = self._get_live_annotated_fnbody(f7)
- self._check_anno_matches(body[0], 'live_in', ('x', 'n', 'range'))
- self._check_anno_matches(body[1], 'live_in', ('x'))
-
- def test_live_context_manager(self):
-
- def f8(x, f):
- with f:
- x += 1
-
- body = self._get_live_annotated_fnbody(f8)
- self._check_anno_matches(body[0], 'live_in', ('f', 'x'))
-
- def test_node_equality(self):
- node_a = gast.parse('y = x').body[0]
- node_b = gast.parse('y = x').body[0]
- self.assertNotEqual(node_a, node_b)
-
- def test_nested_functions_defined(self):
-
- def f(x):
- y = x * 2
-
- def g(z):
- return z + y
-
- return g(x)
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Defined(ctx))
-
- body = node.body[0].body
- self.assertEqual(
- anno.getanno(body[2], 'defined_in'),
- frozenset(map(qual_names.QN, ('g', 'x', 'y'))))
-
- # TODO(alexbw): CFG analysis doesn't currently cross FunctionDef boundaries.
- # NOTE: 'z' is easy to find, but 'y' is not identified as
- # defined, because CFG analysis is applied with each function separately.
- # fndef_body = body[1].body
- # self.assertEqual(
- # anno.getanno(fndef_body[0], 'defined_in'),
- # frozenset(map(qual_names.QN, ('z', 'y'))))
-
- def test_nested_functions_dont_leak_definitions(self):
-
- def f(x):
- print(x)
-
- def g():
- y = 2
- return y
-
- return g() # y is not defined here
-
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.Defined(ctx))
- body = node.body[0].body
- self.assertEqual(
- anno.getanno(body[2], 'defined_in'),
- frozenset(map(qual_names.QN, ('x', 'g'))))
-
- def test_loop_else(self):
-
- # Disabling useless-else-on-loop error, because 'break' and 'continue'
- # canonicalization are a separate analysis pass, and here we test
- # the CFG analysis in isolation.
- def for_orelse(x):
- y = 0
- for i in range(len(x)):
- x += i
- else: # pylint: disable=useless-else-on-loop
- y = 1
- return x, y
-
- def while_orelse(x, i):
- y = 0
- while x < 10:
- x += i
- else: # pylint: disable=useless-else-on-loop
- y = 1
- return x, y
-
- for f in (for_orelse, while_orelse):
- node, ctx = self._parse_and_analyze(f)
- cfg.run_analyses(node, cfg.ReachingDefinitions(ctx))
- body = node.body[0].body
- return_node = body[-1]
- reaching_defs = anno.getanno(return_node, 'definitions_in')
-
- # Y could be defined by Assign(Num(0)) or Assign(Num(1))
- # X could be defined as an argument or an AugAssign.
- y_defs = [node for var, node in reaching_defs if str(var) == 'y']
- x_defs = [node for var, node in reaching_defs if str(var) == 'x']
-
- self.assertEqual(set((gast.Assign,)), set(type(def_) for def_ in y_defs))
- self.assertEqual(set((0, 1)), set(def_.value.n for def_ in y_defs))
- self.assertEqual(len(y_defs), 2)
- self.assertEqual(
- set((gast.arguments, gast.AugAssign)),
- set(type(def_) for def_ in x_defs))
- self.assertEqual(len(x_defs), 2)
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py b/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
index 9ccb98f79a..2d8f922a45 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
@@ -16,7 +16,7 @@
Live values are extracted from the known execution context.
-Requires activity analysis annotations.
+Requires activity and reaching definitions analyses.
"""
from __future__ import absolute_import
@@ -45,14 +45,12 @@ class LiveValueResolver(transformer.Base):
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
- assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
- symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
- assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
- symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
- assert anno.hasanno(node, NodeAnno.IS_PARAM), node
- symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
-
- if not symbol_is_local and not symbol_is_param:
+ defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
+
+ is_defined = bool(defs)
+ has_single_def = len(defs) == 1
+
+ if not is_defined:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.entity_info.namespace:
@@ -79,11 +77,13 @@ class LiveValueResolver(transformer.Base):
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
- if not symbol_is_modified:
- if node.id in self.entity_info.arg_values:
- obj = self.entity_info.arg_values[node.id]
- anno.setanno(node, 'live_val', obj)
- anno.setanno(node, 'fqn', (obj.__class__.__name__,))
+ if has_single_def:
+ def_, = defs
+ if def_.param_of is self.enclosing_entities[0]:
+ if node.id in self.entity_info.arg_values:
+ obj = self.entity_info.arg_values[node.id]
+ anno.setanno(node, 'live_val', obj)
+ anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
@@ -91,12 +91,20 @@ class LiveValueResolver(transformer.Base):
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
- if not hasattr(parent_object, node.attr):
- raise AttributeError('%s has no attribute %s' % (parent_object,
- node.attr))
+
anno.setanno(node, 'parent_type', type(parent_object))
- anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
+ if hasattr(parent_object, node.attr):
+ # This can happen when the attribute's creation and use depend on the
+ # same static condition, for example:
+ #
+ # if cond:
+ # foo.bar = baz
+ # if cond:
+ # x = foo.bar
+ #
+ anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
+
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
index 38af792777..fe3051179c 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/live_values_test.py
@@ -21,11 +21,13 @@ from __future__ import print_function
import six
from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import activity
from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
@@ -48,7 +50,10 @@ class LiveValuesResolverTest(test.TestCase):
arg_types=arg_types,
owner_type=None)
node = qual_names.resolve(node)
+ graphs = cfg.build(node)
node = activity.resolve(node, entity_info)
+ node = reaching_definitions.resolve(node, entity_info, graphs,
+ reaching_definitions.Definition)
node = live_values.resolve(node, entity_info, literals)
node = type_info.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, literals)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py b/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py
new file mode 100644
index 0000000000..bf29d868a2
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/liveness.py
@@ -0,0 +1,200 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Live variable analysis.
+
+This analysis attaches a set containing the live symbols that are live at the
+exit of control flow statements.
+
+Requires activity analysis.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
+
+
+class Analyzer(cfg.GraphVisitor):
+ """CFG visitor that performs liveness analysis at statement level."""
+
+ def __init__(self, graph):
+ super(Analyzer, self).__init__(graph)
+ # This allows communicating that nodes generate extra symbols,
+ # e.g. those that a function definition closes over.
+ self.extra_gen = {}
+
+ def init_state(self, _):
+ return set()
+
+ def visit_node(self, node):
+ prev_live_in = self.in_[node]
+
+ if anno.hasanno(node.ast_node, anno.Static.SCOPE):
+ node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
+
+ gen = node_scope.used | self.extra_gen.get(node.ast_node, frozenset())
+ # TODO(mdan): verify whether composites' parents need to be added.
+ # E.g. if x.y is live whether x needs to be added. Theoretically the
+ # activity analysis should have both so that wouldn't be needed.
+ kill = node_scope.modified
+
+ live_out = set()
+ for n in node.next:
+ live_out |= self.in_[n]
+ live_in = gen | (live_out - kill)
+
+ else:
+ # Nodes that don't have a scope annotation are assumed not to touch any
+ # symbols.
+ # This Name node below is a literal name, e.g. False
+ assert isinstance(node.ast_node,
+ (gast.Name, gast.Continue, gast.Break)), type(
+ node.ast_node)
+ live_in = prev_live_in
+ live_out = live_in
+
+ self.in_[node] = live_in
+ self.out[node] = live_out
+
+ # TODO(mdan): Move this to the superclass?
+ return prev_live_in != live_in
+
+
+class WholeTreeAnalyzer(transformer.Base):
+ """Runs liveness analysis on each of the functions defined in the AST.
+
+ If a function defined other local functions, those will have separate CFGs.
+ However, dataflow analysis needs to tie up these CFGs to properly emulate the
+ effect of closures. In the case of liveness, the parent function's live
+ variables must account for the variables that are live at the entry of each
+ subfunction. For example:
+
+ def foo():
+ # baz is live here
+ def bar():
+ print(baz)
+
+ This analyzer runs liveness analysis on each individual function, accounting
+ for the effect above.
+ """
+
+ def __init__(self, source_info, graphs):
+ super(WholeTreeAnalyzer, self).__init__(source_info)
+ self.graphs = graphs
+ self.current_analyzer = None
+ self.analyzers = {}
+
+ def visit_FunctionDef(self, node):
+ parent_analyzer = self.current_analyzer
+ subgraph = self.graphs[node]
+
+ # Postorder tree processing makes this a bit complicated:
+ # 1. construct an analyzer object and put it on stack
+ # 2. recursively walk the subtree; this will initialize the analyzer's
+ # in_ state properly (done in a block below)
+ # 3. run the final analysis
+ analyzer = Analyzer(subgraph)
+ self.current_analyzer = analyzer
+ node = self.generic_visit(node)
+ analyzer.visit_reverse()
+
+ if parent_analyzer is not None:
+ # Wire the state between the two subgraphs' analyzers.
+ child_in_state = analyzer.in_[subgraph.entry]
+ # Exception: symbols modified in the child function are local to it
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+ for qn in body_scope.modified:
+ # Note: a function modifying the symbol doesn't make that symbol
+ # live at the function's entry. In fact when that happens it is
+ # probably a case of undefined assignment, like this:
+ #
+ # bar = 0
+ # def foo():
+ # print(bar) # bar is undefined here!
+ # bar = 1
+ #
+ # Hence we use discard and not remove below.
+ child_in_state.discard(qn)
+ parent_analyzer.extra_gen[node] = frozenset(child_in_state,)
+
+ self.analyzers[node] = analyzer
+ self.current_analyzer = parent_analyzer
+ return node
+
+ def visit_nonlocal(self, node):
+ raise NotImplementedError()
+
+ def visit_global(self, node):
+ raise NotImplementedError()
+
+
+class Annotator(transformer.Base):
+ """AST visitor that annotates each control flow block with live symbols."""
+
+ # Note: additional nodes may be added as needed.
+
+ def __init__(self, source_info, cross_function_analyzer):
+ super(Annotator, self).__init__(source_info)
+ self.cross_function_analyzer = cross_function_analyzer
+ self.current_analyzer = None
+
+ def visit_FunctionDef(self, node):
+ parent_analyzer = self.current_analyzer
+ self.current_analyzer = self.cross_function_analyzer.analyzers[node]
+
+ node = self.generic_visit(node)
+ self.current_analyzer = parent_analyzer
+ return node
+
+ def _aggregate_successors_live_in(self, node):
+ successors = self.current_analyzer.graph.stmt_next[node]
+ node_live_out = set()
+ for s in successors:
+ node_live_out.update(self.current_analyzer.in_[s])
+ anno.setanno(node, anno.Static.LIVE_VARS_OUT, frozenset(node_live_out))
+ node = self.generic_visit(node)
+ return node
+
+ def visit_If(self, node):
+ return self._aggregate_successors_live_in(node)
+
+ def visit_For(self, node):
+ return self._aggregate_successors_live_in(node)
+
+ def visit_While(self, node):
+ return self._aggregate_successors_live_in(node)
+
+
+def resolve(node, source_info, graphs):
+ """Resolves the live symbols at the exit of control flow statements.
+
+ Args:
+ node: ast.AST
+ source_info: transformer.SourceInfo
+ graphs: Dict[ast.FunctionDef, cfg.Graph]
+ Returns:
+ ast.AST
+ """
+ cross_function_analyzer = WholeTreeAnalyzer(source_info, graphs)
+ node = cross_function_analyzer.visit(node)
+ visitor = Annotator(source_info, cross_function_analyzer)
+ node = visitor.visit(node)
+ return node
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py
new file mode 100644
index 0000000000..d53adb28af
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/liveness_test.py
@@ -0,0 +1,149 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for liveness module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import qual_names
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import liveness
+from tensorflow.python.platform import test
+
+
+class LivenessTest(test.TestCase):
+
+ def _parse_and_analyze(self, test_fn):
+ node, source = parser.parse_entity(test_fn)
+ entity_info = transformer.EntityInfo(
+ source_code=source,
+ source_file=None,
+ namespace={},
+ arg_values=None,
+ arg_types=None,
+ owner_type=None)
+ node = qual_names.resolve(node)
+ node = activity.resolve(node, entity_info)
+ graphs = cfg.build(node)
+ liveness.resolve(node, entity_info, graphs)
+ return node
+
+ def assertHasLiveOut(self, node, expected):
+ live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
+ live_out_str = set(str(v) for v in live_out)
+ if not expected:
+ expected = ()
+ if not isinstance(expected, tuple):
+ expected = (expected,)
+ self.assertSetEqual(live_out_str, set(expected))
+
+ def test_stacked_if(self):
+
+ def test_fn(x, a):
+ if a > 0:
+ x = 0
+ if a > 1:
+ x = 1
+ return x
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], ('a', 'x'))
+ self.assertHasLiveOut(fn_body[1], 'x')
+
+ def test_stacked_if_else(self):
+
+ def test_fn(x, a):
+ if a > 0:
+ x = 0
+ if a > 1:
+ x = 1
+ else:
+ x = 2
+ return x
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'a')
+ self.assertHasLiveOut(fn_body[1], 'x')
+
+ def test_for_basic(self):
+
+ def test_fn(x, a):
+ for i in range(a):
+ x += i
+ return x
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'x')
+
+ def test_attributes(self):
+
+ def test_fn(x, a):
+ if a > 0:
+ x.y = 0
+ return x.y
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
+
+ def test_nested_functions(self):
+
+ def test_fn(a, b):
+ if b:
+ a = []
+
+ def foo():
+ return a
+
+ foo()
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'a')
+
+ def test_nested_functions_isolation(self):
+
+ def test_fn(b):
+ if b:
+ a = 0 # pylint:disable=unused-variable
+
+ def child():
+ max(a) # pylint:disable=used-before-assignment
+ a = 1
+ return a
+
+ child()
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasLiveOut(fn_body[0], 'max')
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
new file mode 100644
index 0000000000..9a84f1231c
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions.py
@@ -0,0 +1,301 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Reaching definition analysis.
+
+This analysis attaches a set of a Definition objects to each symbol, one
+for each distinct definition that may reach it. The Definition objects are
+mutable and may be used by subsequent analyses to further annotate data like
+static type and value information.
+The analysis also attaches the set of the symbols defined at the entry of
+control flow statements.
+
+Requires activity analysis.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gast
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import annos
+
+
+class Definition(object):
+ """Definition objects describe a unique definition of a variable.
+
+ Subclasses of this may be used by passing an appropriate factory fuction to
+ resolve.
+
+ Attributes:
+ param_of: Optional[ast.AST]
+ """
+
+ def __init__(self):
+ self.param_of = None
+
+ def __repr__(self):
+ return '%s[%d]' % (self.__class__.__name__, id(self))
+
+
+class _NodeState(object):
+ """Abstraction for the state of the CFG walk for reaching definition analysis.
+
+ This is a value type. Only implements the strictly necessary operators.
+
+ Attributes:
+ value: Dict[qual_names.QN, Set[Definition, ...]], the defined symbols and
+ their possible definitions
+ """
+
+ def __init__(self, init_from=None):
+ if init_from:
+ if isinstance(init_from, _NodeState):
+ self.value = {
+ s: set(other_infos) for s, other_infos in init_from.value.items()
+ }
+ elif isinstance(init_from, dict):
+ self.value = {s: set((init_from[s],)) for s in init_from}
+ else:
+ assert False, init_from
+ else:
+ self.value = {}
+
+ def __eq__(self, other):
+ if frozenset(self.value.keys()) != frozenset(other.value.keys()):
+ return False
+ ret = all(self.value[s] == other.value[s] for s in self.value)
+ return ret
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __or__(self, other):
+ assert isinstance(other, _NodeState)
+ result = _NodeState(self)
+ for s, other_infos in other.value.items():
+ if s in result.value:
+ result.value[s].update(other_infos)
+ else:
+ result.value[s] = set(other_infos)
+ return result
+
+ def __sub__(self, other):
+ assert isinstance(other, set)
+ result = _NodeState(self)
+ for s in other:
+ result.value.pop(s, None)
+ return result
+
+ def __repr__(self):
+ return 'NodeState[%s]=%s' % (id(self), repr(self.value))
+
+
+class Analyzer(cfg.GraphVisitor):
+ """CFG visitor that determines reaching definitions at statement level."""
+
+ def __init__(self, graph, definition_factory):
+ self._definition_factory = definition_factory
+ super(Analyzer, self).__init__(graph)
+ # This allows communicating that nodes have extra reaching definitions,
+ # e.g. those that a function closes over.
+ self.extra_in = {}
+
+ self.gen_map = {}
+
+ def init_state(self, _):
+ return _NodeState()
+
+ def visit_node(self, node):
+ prev_defs_out = self.out[node]
+
+ defs_in = _NodeState(self.extra_in.get(node.ast_node, None))
+ for n in node.prev:
+ defs_in |= self.out[n]
+
+ if anno.hasanno(node.ast_node, anno.Static.SCOPE):
+ node_scope = anno.getanno(node.ast_node, anno.Static.SCOPE)
+ # The definition objects created by each node must be singletons because
+ # their ids are used in equality checks.
+ if node not in self.gen_map:
+ node_symbols = {}
+ for s in node_scope.modified:
+ def_ = self._definition_factory()
+ if s in node_scope.params:
+ def_.param_of = node_scope.params[s]
+ node_symbols[s] = def_
+ self.gen_map[node] = _NodeState(node_symbols)
+
+ gen = self.gen_map[node]
+ kill = node_scope.modified
+ defs_out = gen | (defs_in - kill)
+
+ else:
+ # Nodes that don't have a scope annotation are assumed not to touch any
+ # symbols.
+ # This Name node below is a literal name, e.g. False
+ # This can also happen if activity.py forgot to annotate the node with a
+ # scope object.
+ assert isinstance(
+ node.ast_node,
+ (gast.Name, gast.Break, gast.Continue, gast.Raise)), (node.ast_node,
+ node)
+ defs_out = defs_in
+
+ self.in_[node] = defs_in
+ self.out[node] = defs_out
+
+ # TODO(mdan): Move this to the superclass?
+ return prev_defs_out != defs_out
+
+
+class TreeAnnotator(transformer.Base):
+ """AST visitor that annotates each symbol name with its reaching definitions.
+
+ Simultaneously, the visitor runs the dataflow analysis on each function node,
+ accounting for the effect of closures. For example:
+
+ def foo():
+ bar = 1
+ def baz():
+ # bar = 1 reaches here
+ """
+
+ def __init__(self, source_info, graphs, definition_factory):
+ super(TreeAnnotator, self).__init__(source_info)
+ self.definition_factory = definition_factory
+ self.graphs = graphs
+ self.current_analyzer = None
+ self.current_cfg_node = None
+
+ def visit_FunctionDef(self, node):
+ parent_analyzer = self.current_analyzer
+ subgraph = self.graphs[node]
+
+ # Preorder tree processing:
+ # 1. if this is a child function, the parent was already analyzed and it
+ # has the proper state value for the subgraph's entry
+ # 2. analyze the current function body
+ # 2. recursively walk the subtree; child functions will be processed
+ analyzer = Analyzer(subgraph, self.definition_factory)
+ if parent_analyzer is not None:
+ # Wire the state between the two subgraphs' analyzers.
+ parent_out_state = parent_analyzer.out[parent_analyzer.graph.index[node]]
+ # Exception: symbols modified in the child function are local to it
+ body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
+ parent_out_state -= body_scope.modified
+ analyzer.extra_in[node.args] = parent_out_state
+
+ # Complete the analysis for the local function and annotate its body.
+ analyzer.visit_forward()
+
+ # Recursively process any remaining subfunctions.
+ self.current_analyzer = analyzer
+ # Note: not visiting name, decorator_list and returns because they don't
+ # apply to this anlysis.
+ # TODO(mdan): Should we still process the function name?
+ node.args = self.visit(node.args)
+ node.body = self.visit_block(node.body)
+ self.current_analyzer = parent_analyzer
+
+ return node
+
+ def visit_nonlocal(self, node):
+ raise NotImplementedError()
+
+ def visit_global(self, node):
+ raise NotImplementedError()
+
+ def visit_Name(self, node):
+ if self.current_analyzer is None:
+ # Names may appear outside function defs - for example in class
+ # definitions.
+ return node
+
+ analyzer = self.current_analyzer
+ cfg_node = self.current_cfg_node
+
+ assert cfg_node is not None, 'name node outside of any statement?'
+
+ qn = anno.getanno(node, anno.Basic.QN)
+ if isinstance(node.ctx, gast.Load):
+ anno.setanno(node, anno.Static.DEFINITIONS,
+ tuple(analyzer.in_[cfg_node].value.get(qn, ())))
+ else:
+ anno.setanno(node, anno.Static.DEFINITIONS,
+ tuple(analyzer.out[cfg_node].value.get(qn, ())))
+
+ return node
+
+ def _aggregate_predecessors_defined_in(self, node):
+ preds = self.current_analyzer.graph.stmt_prev[node]
+ node_defined_in = set()
+ for p in preds:
+ node_defined_in |= set(self.current_analyzer.out[p].value.keys())
+ anno.setanno(node, anno.Static.DEFINED_VARS_IN, frozenset(node_defined_in))
+
+ def visit_If(self, node):
+ self._aggregate_predecessors_defined_in(node)
+ return self.generic_visit(node)
+
+ def visit_For(self, node):
+ self._aggregate_predecessors_defined_in(node)
+
+ # Manually accounting for the shortcoming described in
+ # cfg.AstToCfg.visit_For.
+ parent = self.current_cfg_node
+ self.current_cfg_node = self.current_analyzer.graph.index[node.iter]
+ node.target = self.visit(node.target)
+ self.current_cfg_node = parent
+
+ node.iter = self.visit(node.iter)
+ node.body = self.visit_block(node.body)
+ node.orelse = self.visit_block(node.orelse)
+
+ return node
+
+ def visit_While(self, node):
+ self._aggregate_predecessors_defined_in(node)
+ return self.generic_visit(node)
+
+ def visit(self, node):
+ parent = self.current_cfg_node
+
+ if (self.current_analyzer is not None and
+ node in self.current_analyzer.graph.index):
+ self.current_cfg_node = self.current_analyzer.graph.index[node]
+ node = super(TreeAnnotator, self).visit(node)
+
+ self.current_cfg_node = parent
+ return node
+
+
+def resolve(node, source_info, graphs, definition_factory):
+ """Resolves reaching definitions for each symbol.
+
+ Args:
+ node: ast.AST
+ source_info: transformer.SourceInfo
+ graphs: Dict[ast.FunctionDef, cfg.Graph]
+ definition_factory: Callable[[], Definition]
+ Returns:
+ ast.AST
+ """
+ visitor = TreeAnnotator(source_info, graphs, definition_factory)
+ node = visitor.visit(node)
+ return node
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py
new file mode 100644
index 0000000000..243fe804b2
--- /dev/null
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/reaching_definitions_test.py
@@ -0,0 +1,263 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for reaching_definitions module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
+from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import qual_names
+from tensorflow.contrib.autograph.pyct import transformer
+from tensorflow.contrib.autograph.pyct.static_analysis import activity
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
+from tensorflow.python.platform import test
+
+
+class DefinitionInfoTest(test.TestCase):
+
+ def _parse_and_analyze(self, test_fn):
+ node, source = parser.parse_entity(test_fn)
+ entity_info = transformer.EntityInfo(
+ source_code=source,
+ source_file=None,
+ namespace={},
+ arg_values=None,
+ arg_types=None,
+ owner_type=None)
+ node = qual_names.resolve(node)
+ node = activity.resolve(node, entity_info)
+ graphs = cfg.build(node)
+ node = reaching_definitions.resolve(node, entity_info, graphs,
+ reaching_definitions.Definition)
+ return node
+
+ def assertHasDefs(self, node, num):
+ defs = anno.getanno(node, anno.Static.DEFINITIONS)
+ self.assertEqual(len(defs), num)
+ for r in defs:
+ self.assertIsInstance(r, reaching_definitions.Definition)
+
+ def assertHasDefinedIn(self, node, expected):
+ defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
+ defined_in_str = set(str(v) for v in defined_in)
+ if not expected:
+ expected = ()
+ if not isinstance(expected, tuple):
+ expected = (expected,)
+ self.assertSetEqual(defined_in_str, set(expected))
+
+ def assertSameDef(self, first, second):
+ self.assertHasDefs(first, 1)
+ self.assertHasDefs(second, 1)
+ self.assertIs(
+ anno.getanno(first, anno.Static.DEFINITIONS)[0],
+ anno.getanno(second, anno.Static.DEFINITIONS)[0])
+
+ def assertNotSameDef(self, first, second):
+ self.assertHasDefs(first, 1)
+ self.assertHasDefs(second, 1)
+ self.assertIsNot(
+ anno.getanno(first, anno.Static.DEFINITIONS)[0],
+ anno.getanno(second, anno.Static.DEFINITIONS)[0])
+
+ def test_conditional(self):
+
+ def test_fn(a, b):
+ a = []
+ if b:
+ a = []
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].test, 1)
+ self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[2].value, 2)
+
+ self.assertHasDefinedIn(fn_body[1], ('a', 'b'))
+
+ def test_while(self):
+
+ def test_fn(a):
+ max(a)
+ while True:
+ a = a
+ a = a
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].value.args[0], 1)
+ self.assertHasDefs(fn_body[1].body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].body[1].targets[0], 1)
+ self.assertHasDefs(fn_body[1].body[1].value, 1)
+ # The loop does have an invariant test, but the CFG doesn't know that.
+ self.assertHasDefs(fn_body[1].body[0].value, 2)
+ self.assertHasDefs(fn_body[2].value, 2)
+
+ def test_while_else(self):
+
+ def test_fn(x, i):
+ y = 0
+ while x:
+ x += i
+ if i:
+ break
+ else:
+ y = 1
+ return x, y
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].test, 2)
+ self.assertHasDefs(fn_body[1].body[0].target, 1)
+ self.assertHasDefs(fn_body[1].body[1].test, 1)
+ self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
+ self.assertHasDefs(fn_body[2].value.elts[0], 2)
+ self.assertHasDefs(fn_body[2].value.elts[1], 2)
+
+ def test_for_else(self):
+
+ def test_fn(x, i):
+ y = 0
+ for i in x:
+ x += i
+ if i:
+ break
+ else:
+ continue
+ else:
+ y = 1
+ return x, y
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].target, 1)
+ self.assertHasDefs(fn_body[1].body[0].target, 1)
+ self.assertHasDefs(fn_body[1].body[1].test, 1)
+ self.assertHasDefs(fn_body[1].orelse[0].targets[0], 1)
+ self.assertHasDefs(fn_body[2].value.elts[0], 2)
+ self.assertHasDefs(fn_body[2].value.elts[1], 2)
+
+ def test_nested_functions(self):
+
+ def test_fn(a, b):
+ a = []
+ if b:
+ a = []
+
+ def foo():
+ return a
+
+ foo()
+
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+ def_of_a_in_if = fn_body[1].body[0].targets[0]
+
+ self.assertHasDefs(fn_body[0].targets[0], 1)
+ self.assertHasDefs(fn_body[1].test, 1)
+ self.assertHasDefs(def_of_a_in_if, 1)
+ self.assertHasDefs(fn_body[2].value, 2)
+
+ inner_fn_body = fn_body[1].body[1].body
+ self.assertSameDef(inner_fn_body[0].value, def_of_a_in_if)
+
+ def test_nested_functions_isolation(self):
+
+ def test_fn(a):
+ a = 0
+
+ def child():
+ a = 1
+ return a
+
+ child()
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ parent_return = fn_body[3]
+ child_return = fn_body[1].body[1]
+ # The assignment `a = 1` makes `a` local to `child`.
+ self.assertNotSameDef(parent_return.value, child_return.value)
+
+ def test_function_call_in_with(self):
+
+ def foo(_):
+ pass
+
+ def test_fn(a):
+ with foo(a):
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ self.assertHasDefs(fn_body[0].items[0].context_expr.func, 0)
+ self.assertHasDefs(fn_body[0].items[0].context_expr.args[0], 1)
+
+ def test_mutation_subscript(self):
+
+ def test_fn(a):
+ l = []
+ l[0] = a
+ return l
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ creation = fn_body[0].targets[0]
+ mutation = fn_body[1].targets[0].value
+ use = fn_body[2].value
+ self.assertSameDef(creation, mutation)
+ self.assertSameDef(creation, use)
+
+ def test_replacement(self):
+
+ def foo(a):
+ return a
+
+ def test_fn(a):
+ a = foo(a)
+ return a
+
+ node = self._parse_and_analyze(test_fn)
+ fn_body = node.body[0].body
+
+ param = node.body[0].args.args[0]
+ source = fn_body[0].value.args[0]
+ target = fn_body[0].targets[0]
+ retval = fn_body[1].value
+ self.assertSameDef(param, source)
+ self.assertNotSameDef(source, target)
+ self.assertSameDef(target, retval)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py b/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
index a229c288a8..835d5199fa 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/type_info.py
@@ -43,9 +43,8 @@ from __future__ import print_function
import gast
-from tensorflow.contrib.autograph import utils
from tensorflow.contrib.autograph.pyct import anno
-from tensorflow.contrib.autograph.pyct import parser
+from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.util import tf_inspect
@@ -166,7 +165,6 @@ class TypeInfoResolver(transformer.Base):
definition = self.scope.getval(qn)
anno.copyanno(definition, node, 'type')
anno.copyanno(definition, node, 'type_fqn')
- anno.setanno(node, 'definition', definition)
# TODO(mdan): Remove this when the directives module is in.
anno.copyanno(definition, node, 'element_type')
@@ -198,52 +196,18 @@ class TypeInfoResolver(transformer.Base):
def visit_With(self, node):
for item in node.items:
if item.optional_vars is not None:
- self.apply_to_single_assignments((item.optional_vars,),
- item.context_expr,
- self._process_variable_assignment)
+ ast_util.apply_to_single_assignments((item.optional_vars,),
+ item.context_expr,
+ self._process_variable_assignment)
self.generic_visit(node)
return node
def visit_Assign(self, node):
self.generic_visit(node)
- self.apply_to_single_assignments(
- node.targets, node.value, self._process_variable_assignment)
+ ast_util.apply_to_single_assignments(node.targets, node.value,
+ self._process_variable_assignment)
return node
- # TODO(mdan): Remove as soon as the new directives module is ready.
- def visit_Call(self, node):
- if anno.hasanno(node.func, 'live_val'):
- # Symbols targeted by the "set_type" marker function are assigned the data
- # type that it specified.
- if anno.getanno(node.func, 'live_val') is utils.set_element_type:
-
- if len(node.args) < 2 or len(node.args) > 3:
- raise ValueError('"%s" must have either two or three parameters'
- % self.context.type_annotation_func)
- if len(node.args) == 2:
- target_arg, type_arg = node.args
- shape_arg = parser.parse_expression('None')
- else:
- target_arg, type_arg, shape_arg = node.args
- if not anno.hasanno(target_arg, anno.Basic.QN):
- raise ValueError('the first argument of "%s" must by a symbol' %
- utils.set_element_type)
- # TODO(mdan): This is vulnerable to symbol renaming.
- element_type = type_arg
- element_shape = shape_arg
-
- target_symbol = anno.getanno(target_arg, anno.Basic.QN)
- # Find the definition of this symbol and annotate it with the given
- # data type. That in turn will cause future uses of the symbol
- # to receive the same type annotation.
- definition = self.scope.getval(target_symbol)
- anno.setanno(node, 'element_type', element_type)
- anno.setanno(node, 'element_shape', element_shape)
- anno.setanno(definition, 'element_type', element_type)
- anno.setanno(definition, 'element_shape', element_shape)
- # TODO(mdan): Should we update references between definition and here?
- return self.generic_visit(node)
-
def resolve(node, context):
return TypeInfoResolver(context).visit(node)
diff --git a/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py b/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
index 32b1148ab2..404311ba24 100644
--- a/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
+++ b/tensorflow/contrib/autograph/pyct/static_analysis/type_info_test.py
@@ -19,11 +19,13 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.pyct import anno
+from tensorflow.contrib.autograph.pyct import cfg
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import activity
from tensorflow.contrib.autograph.pyct.static_analysis import live_values
+from tensorflow.contrib.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.contrib.autograph.pyct.static_analysis import type_info
from tensorflow.python.client import session
from tensorflow.python.platform import test
@@ -69,7 +71,10 @@ class TypeInfoResolverTest(test.TestCase):
arg_types=arg_types,
owner_type=None)
node = qual_names.resolve(node)
+ graphs = cfg.build(node)
node = activity.resolve(node, entity_info)
+ node = reaching_definitions.resolve(node, entity_info, graphs,
+ reaching_definitions.Definition)
node = live_values.resolve(node, entity_info, {})
node = type_info.resolve(node, entity_info)
node = live_values.resolve(node, entity_info, {})
diff --git a/tensorflow/contrib/autograph/pyct/templates.py b/tensorflow/contrib/autograph/pyct/templates.py
index 9c479ebc2f..72d1d3b269 100644
--- a/tensorflow/contrib/autograph/pyct/templates.py
+++ b/tensorflow/contrib/autograph/pyct/templates.py
@@ -26,6 +26,7 @@ import textwrap
import gast
+from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import qual_names
@@ -43,39 +44,65 @@ class ReplaceTransformer(gast.NodeTransformer):
"""
self.replacements = replacements
self.in_replacements = False
+ self.preserved_annos = {
+ anno.Basic.ORIGIN,
+ anno.Basic.SKIP_PROCESSING,
+ anno.Static.ORIG_DEFINITIONS,
+ }
+
+ def _prepare_replacement(self, replaced, key):
+ """Prepares a replacement AST that's safe to swap in for a node.
+
+ Args:
+ replaced: ast.AST, the node being replaced
+ key: Hashable, the key of the replacement AST
+ Returns:
+ ast.AST, the replacement AST
+ """
+ repl = self.replacements[key]
+
+ new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
+ if isinstance(new_nodes, gast.AST):
+ new_nodes = [new_nodes]
+
+ return new_nodes
def visit_Expr(self, node):
- if (isinstance(node.value, gast.Name) and
- node.value.id in self.replacements):
- return self.visit(node.value)
- self.generic_visit(node)
- return node
+ # When replacing a placeholder with an entire statement, the replacement
+ # must stand on its own and not be wrapped in an Expr.
+ new_value = self.visit(node.value)
+ if new_value is node.value:
+ return node
+ return new_value
def visit_keyword(self, node):
- if node.arg in self.replacements:
- repl = self.replacements[node.arg]
- if isinstance(repl, gast.keyword):
- return repl
- elif (isinstance(repl, (list, tuple)) and repl and
- all(isinstance(r, gast.keyword) for r in repl)):
- return repl
- # TODO(mdan): We may allow replacing with a string as well.
- # For example, if one wanted to replace foo with bar in foo=baz, then
- # we could allow changing just node arg, so that we end up with bar=baz.
- raise ValueError(
- 'a keyword argument may only be replaced by another keyword or a '
- 'non-empty list of keywords. Found: %s' % repl)
- return self.generic_visit(node)
+ if node.arg not in self.replacements:
+ return self.generic_visit(node)
+
+ repl = self._prepare_replacement(node, node.arg)
+ if isinstance(repl, gast.keyword):
+ return repl
+ elif (repl and isinstance(repl, (list, tuple)) and
+ all(isinstance(r, gast.keyword) for r in repl)):
+ return repl
+ # TODO(mdan): We may allow replacing with a string as well.
+ # For example, if one wanted to replace foo with bar in foo=baz, then
+ # we could allow changing just node arg, so that we end up with bar=baz.
+ raise ValueError(
+ 'a keyword argument may only be replaced by another keyword or a '
+ 'non-empty list of keywords. Found: %s' % repl)
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
- if node.name in self.replacements:
- repl = self.replacements[node.name]
- if not isinstance(repl, (gast.Name, ast.Name)):
- raise ValueError(
- 'a function name can only be replaced by a Name node. Found: %s' %
- repl)
- node.name = repl.id
+ if node.name not in self.replacements:
+ return node
+
+ repl = self.replacements[node.name]
+ if not isinstance(repl, (gast.Name, ast.Name)):
+ raise ValueError(
+ 'a function name can only be replaced by a Name node. Found: %s' %
+ repl)
+ node.name = repl.id
return node
def _check_has_context(self, node):
@@ -148,6 +175,7 @@ class ReplaceTransformer(gast.NodeTransformer):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
+
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
@@ -159,9 +187,7 @@ class ReplaceTransformer(gast.NodeTransformer):
if node.id not in self.replacements:
return node
- new_nodes = ast_util.copy_clean(self.replacements[node.id])
- if isinstance(new_nodes, gast.AST):
- new_nodes = [new_nodes]
+ new_nodes = self._prepare_replacement(node, node.id)
# Preserve the target context.
for n in new_nodes:
@@ -182,7 +208,7 @@ class ReplaceTransformer(gast.NodeTransformer):
def _convert_to_ast(n):
- """Convert from a known data type to AST."""
+ """Converts from a known data type to AST."""
if isinstance(n, str):
# Note: the node will receive the ctx value from the template, see
# ReplaceTransformer.visit_Name.
@@ -197,7 +223,7 @@ def _convert_to_ast(n):
def replace(template, **replacements):
- """Replace placeholders in a Python template.
+ """Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
diff --git a/tensorflow/contrib/autograph/pyct/templates_test.py b/tensorflow/contrib/autograph/pyct/templates_test.py
index a01f8bf04c..a8bbc5a4de 100644
--- a/tensorflow/contrib/autograph/pyct/templates_test.py
+++ b/tensorflow/contrib/autograph/pyct/templates_test.py
@@ -151,17 +151,13 @@ class TemplatesTest(test.TestCase):
self.assertEqual(node.func.id, 'bar')
self.assertEqual(node.func.args[0].id, 'baz')
- def replace_as_expression_restrictions(self):
+ def test_replace_as_expression_restrictions(self):
template = """
foo(a)
bar(b)
"""
with self.assertRaises(ValueError):
templates.replace_as_expression(template)
- with self.assertRaises(ValueError):
- templates.replace('')
- with self.assertRaises(ValueError):
- templates.replace('a = b')
if __name__ == '__main__':
diff --git a/tensorflow/contrib/autograph/pyct/transformer.py b/tensorflow/contrib/autograph/pyct/transformer.py
index 7655811830..3e8906823e 100644
--- a/tensorflow/contrib/autograph/pyct/transformer.py
+++ b/tensorflow/contrib/autograph/pyct/transformer.py
@@ -59,6 +59,103 @@ class EntityInfo(object):
self.owner_type = owner_type
+class _StateStack(object):
+ """Typed stack abstraction.
+
+ This class provides syntactic sugar for a stack of objects of known
+ type. It allows accessing attributes of the object at the top of the stack
+ directly against this object, which allows for very terse syntax.
+
+ For example, this code:
+
+ stack = _StateStack(Foo)
+ stack.enter()
+ stack.bar
+
+ Is equivalent to:
+
+ stack = []
+ stack.append(Foo())
+ foo = stack[-1]
+ foo.bar
+
+ See _State for more on how this is used.
+
+ Attributes:
+ type: Any, the type of objects that this stack holds
+ level: int, the current stack depth
+ value: Any, the instance of the object at the top of the stack
+ """
+
+ def __init__(self, type_):
+ # Because we override __setattr__, we need to attach these attributes using
+ # the superclass' setattr.
+ object.__setattr__(self, 'type', type_)
+ object.__setattr__(self, '_stack', [])
+ self.enter()
+
+ def enter(self):
+ self._stack.append(self.type())
+
+ def exit(self):
+ return self._stack.pop()
+
+ @property
+ def level(self):
+ return len(self._stack)
+
+ @property
+ def value(self):
+ return self._stack[-1]
+
+ def __getattr__(self, key):
+ return getattr(self._stack[-1], key)
+
+ def __setattr__(self, key, value):
+ setattr(self._stack[-1], key, value)
+
+
+class _State(object):
+ """Supporting class for nested scope variable space for converter.Base.
+
+ This structure offers syntactic sugar over a dict of stacks of objects
+ of known type. These structures are useful to keep state during AST walks.
+ Multiple different scopes can be tracked in parallel. For example:
+
+ s = _State()
+
+ s[foo].enter()
+ s[bar].enter() # this will not affect s[foo]
+
+ Element access has special semantics:
+ * keys are a data type
+ * element values are _StateStack(type=key) objects
+ * missing elements are automatically added, similarly to defaultdict
+
+ For example, the following block :
+
+ _State s
+ s[Foo]
+
+ Is equivalent to:
+
+ s = {}
+ if Foo not in s:
+ s[Foo] = Foo()
+ s[Foo]
+
+ See Base for how it's used.
+ """
+
+ def __init__(self):
+ self._value = {}
+
+ def __getitem__(self, key):
+ if key not in self._value:
+ self._value[key] = _StateStack(key)
+ return self._value[key]
+
+
class Base(gast.NodeTransformer):
"""Base class for general-purpose code transformers transformers.
@@ -71,6 +168,27 @@ class Base(gast.NodeTransformer):
(possibly nested) scopes, use enter/exit_local_scope and set/get_local.
You must call enter/exit_local_scope manually, but the transformer detects
when they are not properly paired.
+
+ The transformer allows keeping state across calls to visit_* that is local to
+ arbitrary nodes and their descendants, using the self.state attribute.
+ Multiple independent scopes are allowed and automatically constructed.
+
+ For example, to keep track of the If node that encloses any Name node, one can
+ write:
+
+ class FooType(object):
+
+ def __init__(self):
+ self.foo_property = None
+
+ class DummyTransformer(Base):
+
+ def visit_If(self, node):
+ self.state[FooType].enter()
+ self.state[FooType].foo_property = node
+
+ def visit_Name(self, node):
+ self.state[FooType].foo_property # will hold the innermost enclosing if
"""
# TODO(mdan): Document all extra features.
@@ -92,6 +210,12 @@ class Base(gast.NodeTransformer):
self._local_scope_state = []
self.enter_local_scope()
+ # Allows scoping of local variables to keep state across calls to visit_*
+ # methods. Multiple scope hierchies may exist and are keyed by tag. A scope
+ # is valid at one or more nodes and all its children. Scopes created in
+ # child nodes supersede their parent. Scopes are isolated from one another.
+ self.state = _State()
+
@property
def enclosing_entities(self):
return tuple(self._enclosing_entities)
@@ -101,7 +225,9 @@ class Base(gast.NodeTransformer):
return len(self._local_scope_state)
def enter_local_scope(self, inherit=None):
- """Marks entry into a new local scope.
+ """Deprecated. Use self.state instead.
+
+ Marks entry into a new local scope.
Args:
inherit: Optional enumerable of variable names to copy from the
@@ -116,7 +242,9 @@ class Base(gast.NodeTransformer):
self._local_scope_state.append(scope_entered)
def exit_local_scope(self, keep=None):
- """Marks exit from the current local scope.
+ """Deprecated. Use self.state instead.
+
+ Marks exit from the current local scope.
Args:
keep: Optional enumerable of variable names to copy into the
@@ -133,9 +261,11 @@ class Base(gast.NodeTransformer):
return scope_left
def set_local(self, name, value):
+ """Deprecated. Use self.state instead."""
self._local_scope_state[-1][name] = value
def get_local(self, name, default=None):
+ """Deprecated. Use self.state instead."""
return self._local_scope_state[-1].get(name, default)
def debug_print(self, node):
@@ -216,7 +346,7 @@ class Base(gast.NodeTransformer):
node_destination = new_destination
return results
- # TODO(mdan): Once we have error tracing, we may be able to just go to SSA.
+ # TODO(mdan): Remove.
def apply_to_single_assignments(self, targets, values, apply_fn):
"""Applies a function to each individual assignment.
@@ -266,11 +396,27 @@ class Base(gast.NodeTransformer):
def _get_source(self, node):
try:
- return compiler.ast_to_source(node)
- except AssertionError:
+ source, _ = compiler.ast_to_source(node)
+ return source
+ # pylint: disable=broad-except
+ # This function is used for error reporting. If an exception occurs here,
+ # it should be suppressed, in favor of emitting as informative a message
+ # about the original error as possible.
+ except Exception:
return '<could not convert AST to source>'
def visit(self, node):
+ if not isinstance(node, gast.AST):
+ # This is not that uncommon a mistake: various node bodies are lists, for
+ # example, posing a land mine for transformers that need to recursively
+ # call `visit`. The error needs to be raised before the exception handler
+ # below is installed, because said handler will mess up if `node` is not,
+ # in fact, a node.
+ msg = (
+ 'invalid value for "node": expected "ast.AST", got "{}"; to'
+ ' visit lists of nodes, use "visit_block" instead').format(type(node))
+ raise ValueError(msg)
+
source_code = self.entity_info.source_code
source_file = self.entity_info.source_file
did_enter_function = False
diff --git a/tensorflow/contrib/autograph/pyct/transformer_test.py b/tensorflow/contrib/autograph/pyct/transformer_test.py
index baf04653ae..a37e922a1d 100644
--- a/tensorflow/contrib/autograph/pyct/transformer_test.py
+++ b/tensorflow/contrib/autograph/pyct/transformer_test.py
@@ -93,6 +93,83 @@ class TransformerTest(test.TestCase):
inner_function, lambda_node),
anno.getanno(lambda_expr, 'enclosing_entities'))
+ def assertSameAnno(self, first, second, key):
+ self.assertIs(anno.getanno(first, key), anno.getanno(second, key))
+
+ def assertDifferentAnno(self, first, second, key):
+ self.assertIsNot(anno.getanno(first, key), anno.getanno(second, key))
+
+ def test_state_tracking(self):
+
+ class LoopState(object):
+ pass
+
+ class CondState(object):
+ pass
+
+ class TestTransformer(transformer.Base):
+
+ def visit(self, node):
+ anno.setanno(node, 'loop_state', self.state[LoopState].value)
+ anno.setanno(node, 'cond_state', self.state[CondState].value)
+ return super(TestTransformer, self).visit(node)
+
+ def visit_While(self, node):
+ self.state[LoopState].enter()
+ node = self.generic_visit(node)
+ self.state[LoopState].exit()
+ return node
+
+ def visit_If(self, node):
+ self.state[CondState].enter()
+ node = self.generic_visit(node)
+ self.state[CondState].exit()
+ return node
+
+ tr = TestTransformer(self._simple_source_info())
+
+ def test_function(a):
+ a = 1
+ while a:
+ _ = 'a'
+ if a > 2:
+ _ = 'b'
+ while True:
+ raise '1'
+ if a > 3:
+ _ = 'c'
+ while True:
+ raise '1'
+
+ node, _ = parser.parse_entity(test_function)
+ node = tr.visit(node)
+
+ fn_body = node.body[0].body
+ outer_while_body = fn_body[1].body
+ self.assertSameAnno(fn_body[0], outer_while_body[0], 'cond_state')
+ self.assertDifferentAnno(fn_body[0], outer_while_body[0], 'loop_state')
+
+ first_if_body = outer_while_body[1].body
+ self.assertDifferentAnno(outer_while_body[0], first_if_body[0],
+ 'cond_state')
+ self.assertSameAnno(outer_while_body[0], first_if_body[0], 'loop_state')
+
+ first_inner_while_body = first_if_body[1].body
+ self.assertSameAnno(first_if_body[0], first_inner_while_body[0],
+ 'cond_state')
+ self.assertDifferentAnno(first_if_body[0], first_inner_while_body[0],
+ 'loop_state')
+
+ second_if_body = outer_while_body[2].body
+ self.assertDifferentAnno(first_if_body[0], second_if_body[0], 'cond_state')
+ self.assertSameAnno(first_if_body[0], second_if_body[0], 'loop_state')
+
+ second_inner_while_body = second_if_body[1].body
+ self.assertDifferentAnno(first_inner_while_body[0],
+ second_inner_while_body[0], 'cond_state')
+ self.assertDifferentAnno(first_inner_while_body[0],
+ second_inner_while_body[0], 'loop_state')
+
def test_local_scope_info_stack(self):
class TestTransformer(transformer.Base):
@@ -205,6 +282,88 @@ class TransformerTest(test.TestCase):
self.assertTrue(isinstance(node.body[1].body[0], gast.Assign))
self.assertTrue(isinstance(node.body[1].body[1], gast.Return))
+ def test_robust_error_on_list_visit(self):
+
+ class BrokenTransformer(transformer.Base):
+
+ def visit_If(self, node):
+ # This is broken because visit expects a single node, not a list, and
+ # the body of an if is a list.
+ # Importantly, the default error handling in visit also expects a single
+ # node. Therefore, mistakes like this need to trigger a type error
+ # before the visit called here installs its error handler.
+ # That type error can then be caught by the enclosing call to visit,
+ # and correctly blame the If node.
+ self.visit(node.body)
+ return node
+
+ def test_function(x):
+ if x > 0:
+ return x
+
+ tr = BrokenTransformer(self._simple_source_info())
+
+ node, _ = parser.parse_entity(test_function)
+ with self.assertRaises(transformer.AutographParseError) as cm:
+ node = tr.visit(node)
+ obtained_message = str(cm.exception)
+ expected_message = r'expected "ast.AST", got "\<(type|class) \'list\'\>"'
+ self.assertRegexpMatches(obtained_message, expected_message)
+ # The exception should point at the if statement, not any place else. Could
+ # also check the stack trace.
+ self.assertTrue(
+ 'Occurred at node:\nIf' in obtained_message, obtained_message)
+ self.assertTrue(
+ 'Occurred at node:\nFunctionDef' not in obtained_message,
+ obtained_message)
+ self.assertTrue(
+ 'Occurred at node:\nReturn' not in obtained_message, obtained_message)
+
+ def test_robust_error_on_ast_corruption(self):
+ # A child class should not be able to be so broken that it causes the error
+ # handling in `transformer.Base` to raise an exception. Why not? Because
+ # then the original error location is dropped, and an error handler higher
+ # up in the call stack gives misleading information.
+
+ # Here we test that the error handling in `visit` completes, and blames the
+ # correct original exception, even if the AST gets corrupted.
+
+ class NotANode(object):
+ pass
+
+ class BrokenTransformer(transformer.Base):
+
+ def visit_If(self, node):
+ node.body = NotANode()
+ raise ValueError('I blew up')
+
+ def test_function(x):
+ if x > 0:
+ return x
+
+ tr = BrokenTransformer(self._simple_source_info())
+
+ node, _ = parser.parse_entity(test_function)
+ with self.assertRaises(transformer.AutographParseError) as cm:
+ node = tr.visit(node)
+ obtained_message = str(cm.exception)
+ # The message should reference the exception actually raised, not anything
+ # from the exception handler.
+ expected_substring = 'I blew up'
+ self.assertTrue(expected_substring in obtained_message, obtained_message)
+ # Expect the exception to have failed to parse the corrupted AST
+ self.assertTrue(
+ '<could not convert AST to source>' in obtained_message,
+ obtained_message)
+ # The exception should point at the if statement, not any place else. Could
+ # also check the stack trace.
+ self.assertTrue(
+ 'Occurred at node:\nIf' in obtained_message, obtained_message)
+ self.assertTrue(
+ 'Occurred at node:\nFunctionDef' not in obtained_message,
+ obtained_message)
+ self.assertTrue(
+ 'Occurred at node:\nReturn' not in obtained_message, obtained_message)
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/autograph/utils/BUILD b/tensorflow/contrib/autograph/utils/BUILD
index d82c17bf2a..d2b399f19b 100644
--- a/tensorflow/contrib/autograph/utils/BUILD
+++ b/tensorflow/contrib/autograph/utils/BUILD
@@ -28,7 +28,6 @@ py_library(
"tensor_list.py",
"testing.py",
"type_check.py",
- "type_hints.py",
],
srcs_version = "PY2AND3",
visibility = ["//tensorflow:__subpackages__"],
diff --git a/tensorflow/contrib/autograph/utils/__init__.py b/tensorflow/contrib/autograph/utils/__init__.py
index 817d4126d1..57b5f74741 100644
--- a/tensorflow/contrib/autograph/utils/__init__.py
+++ b/tensorflow/contrib/autograph/utils/__init__.py
@@ -30,4 +30,3 @@ from tensorflow.contrib.autograph.utils.py_func import wrap_py_func
from tensorflow.contrib.autograph.utils.tensor_list import dynamic_list_append
from tensorflow.contrib.autograph.utils.testing import fake_tf
from tensorflow.contrib.autograph.utils.type_check import is_tensor
-from tensorflow.contrib.autograph.utils.type_hints import set_element_type
diff --git a/tensorflow/contrib/autograph/utils/builtins.py b/tensorflow/contrib/autograph/utils/builtins.py
index 998087e056..71079cfdc0 100644
--- a/tensorflow/contrib/autograph/utils/builtins.py
+++ b/tensorflow/contrib/autograph/utils/builtins.py
@@ -52,7 +52,7 @@ def dynamic_len(list_or_tensor):
"""Implementation of len using dynamic dispatch."""
if tensor_util.is_tensor(list_or_tensor):
shape = list_or_tensor.shape
- if not shape:
+ if not shape.ndims:
raise ValueError(
'len requires non-zero rank for tensor "%s"' % list_or_tensor)
return array_ops.shape(list_or_tensor)[0]
diff --git a/tensorflow/contrib/autograph/utils/builtins_test.py b/tensorflow/contrib/autograph/utils/builtins_test.py
index 0c2312178a..b4821f36fc 100644
--- a/tensorflow/contrib/autograph/utils/builtins_test.py
+++ b/tensorflow/contrib/autograph/utils/builtins_test.py
@@ -33,7 +33,8 @@ class BuiltinsTest(test.TestCase):
def test_dynamic_len_tf_scalar(self):
a = constant_op.constant(1)
- with self.assertRaises(ValueError):
+ with self.assertRaisesRegexp(ValueError,
+ 'len requires non-zero rank for tensor.*'):
with self.test_session() as sess:
sess.run(builtins.dynamic_builtin(len, a))
diff --git a/tensorflow/contrib/bigtable/README.md b/tensorflow/contrib/bigtable/README.md
index ef3c60069e..d7c71a20ed 100644
--- a/tensorflow/contrib/bigtable/README.md
+++ b/tensorflow/contrib/bigtable/README.md
@@ -1,10 +1,344 @@
# Bigtable #
-[Google Cloud Bigtable](https://cloud.google.com/bigtable/) is a high
+[Cloud Bigtable](https://cloud.google.com/bigtable/) is a high
performance storage system that can store and serve training data. This contrib
package contains an experimental integration with TensorFlow.
> **Status: Highly experimental.** The current implementation is very much in
> flux. Please use at your own risk! :-)
-<!-- TODO(saeta): Document usage / methods / etc. -->
+The TensorFlow integration with Cloud Bigtable is optimized for common
+TensorFlow usage and workloads. It is currently optimized for reading from Cloud
+Bigtable at high speed, in particular to feed modern accelerators. For
+general-purpose Cloud Bigtable
+APIs, see the [official Cloud Bigtable client library documentation][clientdoc].
+
+[clientdoc]: https://cloud.google.com/bigtable/docs/reference/libraries
+
+## Sample Use
+
+There are three main reading styles supported by the `BigtableTable` class:
+
+ 1. **Reading keys**: Read only the row keys in a table. Keys are returned in
+ sorted order from the table. Most key reading operations retrieve all keys
+ in a contiguous range, however the `sample_keys` operation skips keys, and
+ operates on the whole table (and not a contiguous subset).
+ 2. **Retrieving a row's values**: Given a row key, look up the data associated
+ with a defined set of columns. This operation takes advantage of Cloud
+ Bigtable's low-latency and excellent support for random access.
+ 3. **Scanning ranges**: Given a contiguous range of rows retrieve both the row
+ key and the data associated with a fixed set of columns. This operation
+ takes advantage of Cloud Bigtable's high throughput scans, and is the most
+ efficient way to read data.
+
+When using the Cloud Bigtable API, the workflow is:
+
+ 1. Create a `BigtableClient` object.
+ 2. Use the `BigtableClient` to create `BigtableTable` objects corresponding to
+ each table in the Cloud Bigtable instance you would like to access.
+ 3. Call methods on the `BigtableTable` object to create `tf.data.Dataset`s to
+ retrieve data.
+
+The following is an example for how to read all row keys with the prefix
+`train-`.
+
+```python
+import tensorflow as tf
+
+GCP_PROJECT_ID = '<FILL_ME_IN>'
+BIGTABLE_INSTANCE_ID = '<FILL_ME_IN>'
+BIGTABLE_TABLE_NAME = '<FILL_ME_IN>'
+PREFIX = 'train-'
+
+def main():
+ client = tf.contrib.cloud.BigtableClient(GCP_PROJECT_ID, BIGTABLE_INSTANCE_ID)
+ table = client.table(BIGTABLE_TABLE_NAME)
+ dataset = table.keys_by_prefix_dataset(PREFIX)
+ iterator = dataset.make_initializable_iterator()
+ get_next_op = iterator.get_next()
+
+ with tf.Session() as sess:
+ print('Initializing the iterator.')
+ sess.run(iterator.initializer)
+ print('Retrieving rows:')
+ row_index = 0
+ while True:
+ try:
+ row_key = sess.run(get_next_op)
+ print('Row key %d: %s' % (row_index, row_key))
+ row_index += 1
+ except tf.errors.OutOfRangeError:
+ print('Finished reading data!')
+ break
+
+if __name__ == '__main__':
+ main()
+
+```
+
+### Reading row keys
+
+Read only the row keys in a table. Keys are returned in sorted order from the
+table. Most key reading operations retrieve all keys in a contiguous range,
+however the `sample_keys` operation skips keys, and operates on the whole table
+(and not a contiguous subset).
+
+There are 3 methods to retrieve row keys:
+
+ - `table.keys_by_range_dataset(start, end)`: Retrieve row keys starting with
+ `start`, and ending with `end`. The range is "half-open", and thus it
+ includes `start` if `start` is present in the table. It does not include
+ `end`.
+ - `table.keys_by_prefix_dataset(prefix)`: Retrieves all row keys that start
+ with `prefix`. It includes the row key `prefix` if present in the table.
+ - `table.sample_keys()`: Retrieves a sampling of keys from the underlying
+ table. This is often useful in conjunction with parallel scans.
+
+### Reading cell values given a row key
+
+Given a dataset producing row keys, you can use the `table.lookup_columns`
+transformation to retrieve values. Example:
+
+```python
+key_dataset = tf.data.Dataset.from_tensor_slices([
+ 'row_key_1',
+ 'other_row_key',
+ 'final_row_key',
+])
+values_dataset = key_dataset.apply(
+ table.lookup_columns(('my_column_family', 'column_name'),
+ ('other_cf', 'col')))
+training_data = values_dataset.map(my_parsing_function) # ...
+```
+
+### Scanning ranges
+Given a contiguous range of rows retrieve both the row key and the data
+associated with a fixed set of columns. Scanning is the most efficient way to
+retrieve data from Cloud Bigtable and is thus a very common API for high
+performance data pipelines. To construct a scanning `tf.data.Dataset` from a
+`BigtableTable` object, call one of the following methods:
+
+ - `table.scan_prefix(prefix, ...)`
+ - `table.scan_range(start, end, ...)`
+ - `table.parallel_scan_prefix(prefix, ...)`
+ - `table.parallel_scan_range(start, end, ...)`
+
+Aside from the specification of the contiguous range of rows, they all take the
+following arguments:
+
+ - `probability`: (Optional.) A float between 0 (exclusive) and 1 (inclusive).
+ A non-1 value indicates to probabilistically sample rows with the
+ provided probability.
+ - `columns`: The columns to read. (See below.)
+ - `**kwargs`: The columns to read. (See below.)
+
+In addition the two parallel operations accept the following optional argument:
+`num_parallel_scans` which configures the number of parallel Cloud Bigtable scan
+operations to run. A reasonable default is automatically chosen for small
+Cloud Bigtable clusters. If you have a large cluster, or an extremely demanding
+workload, you can tune this value to optimize performance.
+
+#### Specifying columns to read when scanning
+
+All of the scan operations allow you to specify the column family and columns
+in the same ways.
+
+##### Using `columns`
+
+The first way to specify the data to read is via the `columns` parameter. The
+value should be a tuple (or list of tuples) of strings. The first string in the
+tuple is the column family, and the second string in the tuple is the column
+qualifier.
+
+##### Using `**kwargs`
+
+The second way to specify the data to read is via the `**kwargs` parameter,
+which you can use to specify keyword arguments corresponding to the columns that
+you want to read. The keyword to use is the column family name, and the argument
+value should be either a string, or a tuple of strings, specifying the column
+qualifiers (column names).
+
+Although using `**kwargs` has the advantage of requiring less typing, it is not
+future-proof in all cases. (If we add a new parameter to the scan functions that
+has the same name as your column family, your code will break.)
+
+##### Examples
+
+Below are two equivalent snippets for how to specify which columns to read:
+
+```python
+ds1 = table.scan_range("row_start", "row_end", columns=[("cfa", "c1"),
+ ("cfa", "c2"),
+ ("cfb", "c3")])
+ds2 = table.scan_range("row_start", "row_end", cfa=["c1", "c2"], cfb="c3")
+```
+
+In this example, we are reading 3 columns from a total of 2 column families.
+From the `cfa` column family, we are reading columns `c1`, and `c2`. From the
+second column family (`cfb`), we are reading `c3`. Both `ds1` and `ds2` will
+output elements of the following types (`tf.string`, `tf.string`, `tf.string`,
+`tf.string`). The first `tf.string` is the row key, the second `tf.string` is
+the latest data in cell `cfa:c1`, the third corresponds to `cfa:c2`, and the
+final one is `cfb:c3`.
+
+#### Determinism when scanning
+
+While the non-parallel scan operations are fully deterministic, the parallel
+scan operations are not. If you would like to scan in parallel without losing
+determinism, you can build up the `parallel_interleave` yourself. As an example,
+say we wanted to scan all rows between `training_data_00000`, and
+`training_data_90000`, we can use the following code snippet:
+
+```python
+table = # ...
+columns = [('cf1', 'col1'), ('cf1', 'col2')]
+NUM_PARALLEL_READS = # ...
+ds = tf.data.Dataset.range(9).shuffle(10)
+def interleave_fn(index):
+ # Given a starting index, create 2 strings to be the start and end
+ start_idx = index
+ end_idx = index + 1
+ start_idx_str = tf.as_string(start_idx * 10000, width=5, fill='0')
+ end_idx_str = tf.as_string(end_idx * 10000, width=5, fill='0')
+ start = tf.string_join(['training_data_', start_idx_str])
+ end = tf.string_join(['training_data_', end_idx_str])
+ return table.scan_range(start_idx, end_idx, columns=columns)
+ds = ds.apply(tf.contrib.data.parallel_interleave(
+ interleave_fn, cycle_length=NUM_PARALLEL_READS, prefetch_input_elements=1))
+```
+
+> Note: you should divide up the key range into more sub-ranges for increased
+> parallelism.
+
+## Writing to Cloud Bigtable
+
+In order to simplify getting started, this package provides basic support for
+writing data into Cloud Bigtable.
+
+> Note: The implementation is not optimized for performance! Please consider
+> using alternative frameworks such as Apache Beam / Cloud Dataflow for
+> production workloads.
+
+Below is an example for how to write a trivial dataset into Cloud Bigtable.
+
+```python
+import tensorflow as tf
+
+GCP_PROJECT_ID = '<FILL_ME_IN>'
+BIGTABLE_INSTANCE_ID = '<FILL_ME_IN>'
+BIGTABLE_TABLE_NAME = '<FILL_ME_IN>'
+COLUMN_FAMILY = '<FILL_ME_IN>'
+COLUMN_QUALIFIER = '<FILL_ME_IN>'
+
+def make_dataset():
+ """Makes a dataset to write to Cloud Bigtable."""
+ return tf.data.Dataset.from_tensor_slices([
+ 'training_data_1',
+ 'training_data_2',
+ 'training_data_3',
+ ])
+
+def make_row_key_dataset():
+ """Makes a dataset of strings used for row keys.
+
+ The strings are of the form: `fake-data-` followed by a sequential counter.
+ For example, this dataset would contain the following elements:
+
+ - fake-data-00000001
+ - fake-data-00000002
+ - ...
+ - fake-data-23498103
+ """
+ counter_dataset = tf.contrib.data.Counter()
+ width = 8
+ row_key_prefix = 'fake-data-'
+ ds = counter_dataset.map(lambda index: tf.as_string(index,
+ width=width,
+ fill='0'))
+ ds = ds.map(lambda idx_str: tf.string_join([row_key_prefix, idx_str]))
+ return ds
+
+
+def main():
+ client = tf.contrib.cloud.BigtableClient(GCP_PROJECT_ID, BIGTABLE_INSTANCE_ID)
+ table = client.table(BIGTABLE_TABLE_NAME)
+ dataset = make_dataset()
+ index_dataset = make_row_key_dataset()
+ aggregate_dataset = tf.data.Dataset.zip((index_dataset, dataset))
+ write_op = table.write(aggregate_dataset, column_families=[COLUMN_FAMILY],
+ columns=[COLUMN_QUALIFIER])
+
+ with tf.Session() as sess:
+ print('Starting transfer.')
+ sess.run(write_op)
+ print('Transfer complete.')
+
+if __name__ == '__main__':
+ main()
+```
+
+## Sample applications and architectures
+
+While most machine learning applications are well suited by a high performance
+distributed file system, there are certain applications where using Cloud
+Bigtable works extremely well.
+
+### Perfect Shuffling
+
+Normally, training data is stored in flat files, and a combination of
+(1) `tf.data.Dataset.interleave` (or `parallel_interleave`), (2)
+`tf.data.Dataset.shuffle`, and (3) writing the data in an unsorted order in the
+data files in the first place, provides enough randomization to ensure models
+train efficiently. However, if you would like perfect shuffling, you can use
+Cloud Bigtable's low-latency random access capabilities. Create a
+`tf.data.Dataset` that generates the keys in a perfectly random order (or read
+all the keys into memory and use a shuffle buffer sized to fit all of them for a
+perfect random shuffle using `tf.data.Dataset.shuffle`), and then use
+`lookup_columns` to retrieve the training data.
+
+### Distributed Reinforcement Learning
+
+Sophisticated reinforcement learning algorithms are commonly trained across a
+distributed cluster. (See [IMPALA by DeepMind][impala].) One part of the cluster
+runs self-play, while the other part of the cluster learns a new version of the
+model based on the training data generated by self-play. The new model version
+is then distributed to the self-play half of the cluster, and new training data
+is generated to continue the cycle.
+
+In such a configuration, because there is value in training on the freshest
+examples, a storage service like Cloud Bigtable can be used to store and
+serve the generated training data. When using Cloud Bigtable, there is no need
+to aggregate the examples into large batch files, but the examples can instead
+be written as soon as they are generated, and then retrieved at high speed.
+
+[impala]: https://arxiv.org/abs/1802.01561
+
+## Common Gotchas!
+
+### gRPC Certificates
+
+If you encounter a log line that includes the following:
+
+```
+"description":"Failed to load file", [...],
+"filename":"/usr/share/grpc/roots.pem"
+```
+
+you likely need to copy the [gRPC roots.pem file][grpcPem] to
+`/usr/share/grpc/roots.pem` on your local machine.
+
+[grpcPem]: https://github.com/grpc/grpc/blob/master/etc/roots.pem
+
+### Permission denied errors
+
+The TensorFlow Cloud Bigtable client will search for credentials to use in the
+process's environment. It will use the first credentials it finds if multiple
+are available.
+
+ - **Compute Engine**: When running on Compute Engine, the client will often use
+ the service account from the virtual machine's metadata service. Be sure to
+ authorize your Compute Engine VM to have access to the Cloud Bigtable service
+ when creating your VM.
+ - **Cloud TPU**: Your Cloud TPUs run with the designated Cloud TPU service
+ account dedicated to your GCP project. Ensure the service account has been
+ authorized via the Cloud Console to access your Cloud Bigtable instances.
diff --git a/tensorflow/contrib/bigtable/__init__.py b/tensorflow/contrib/bigtable/__init__.py
index 7df054637c..b7d89c9842 100644
--- a/tensorflow/contrib/bigtable/__init__.py
+++ b/tensorflow/contrib/bigtable/__init__.py
@@ -18,7 +18,7 @@ This contrib package allows TensorFlow to interface directly with Cloud Bigtable
for high-speed data loading.
@@BigtableClient
-@@BigTable
+@@BigtableTable
"""
@@ -26,14 +26,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigTable
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
+from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableTable
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
- 'BigTable',
'BigtableClient',
+ 'BigtableTable',
]
remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
index 70923e6287..a6755a3496 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_kernels.cc
@@ -276,7 +276,7 @@ class ToBigtableOp : public AsyncOpKernel {
}
OP_REQUIRES_ASYNC(
ctx, failures.empty() && mutation_status.ok(),
- errors::Unknown("Failure while writing to BigTable: ",
+ errors::Unknown("Failure while writing to Cloud Bigtable: ",
mutation_status.error_code(), " - ",
mutation_status.error_message(), " (",
mutation_status.error_details(),
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc b/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc
index 2514575f30..67bf14c176 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.cc
@@ -27,10 +27,10 @@ Status GrpcStatusToTfStatus(const ::grpc::Status& status) {
status.error_code() == ::grpc::StatusCode::OUT_OF_RANGE) {
grpc_code = ::grpc::StatusCode::INTERNAL;
}
- return Status(
- static_cast<::tensorflow::error::Code>(status.error_code()),
- strings::StrCat("Error reading from BigTable: ", status.error_message(),
- " (Details: ", status.error_details(), ")"));
+ return Status(static_cast<::tensorflow::error::Code>(status.error_code()),
+ strings::StrCat("Error reading from Cloud Bigtable: ",
+ status.error_message(),
+ " (Details: ", status.error_details(), ")"));
}
string RegexFromStringSet(const std::vector<string>& strs) {
diff --git a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
index 12d8256dea..a2a5df1037 100644
--- a/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
+++ b/tensorflow/contrib/bigtable/kernels/bigtable_lib.h
@@ -58,7 +58,8 @@ class BigtableTableResource : public ResourceBase {
BigtableTableResource(BigtableClientResource* client, string table_name)
: client_(client),
table_name_(std::move(table_name)),
- table_(client->get_client(), table_name_) {
+ table_(client->get_client(), table_name_,
+ google::cloud::bigtable::AlwaysRetryMutationPolicy()) {
client_->Ref();
}
diff --git a/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py b/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py
index 2f20064619..e36f7f32c6 100644
--- a/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py
+++ b/tensorflow/contrib/bigtable/python/kernel_tests/bigtable_ops_test.py
@@ -44,7 +44,7 @@ class BigtableOpsTest(test.TestCase):
def setUp(self):
self._client = gen_bigtable_test_ops.bigtable_test_client()
table = gen_bigtable_ops.bigtable_table(self._client, "testtable")
- self._table = bigtable.BigTable("testtable", None, table)
+ self._table = bigtable.BigtableTable("testtable", None, table)
def _makeSimpleDataset(self):
output_rows = dataset_ops.Dataset.from_tensor_slices(self.COMMON_ROW_KEYS)
diff --git a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
index 9f73b7223c..fd30aa8bbb 100644
--- a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
+++ b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
@@ -94,7 +94,7 @@ class BigtableClient(object):
project_id, instance_id, connection_pool_size, max_receive_message_size)
def table(self, name, snapshot=None):
- """Opens a table and returns a `BigTable` object.
+ """Opens a table and returns a `BigtableTable` object.
Args:
name: A `tf.string` `tf.Tensor` name of the table to open.
@@ -102,19 +102,20 @@ class BigtableClient(object):
request the creation of a snapshot. (Note: currently unimplemented.)
Returns:
- A `BigTable` python object representing the operations available on the
- table.
+ A `BigtableTable` python object representing the operations available on
+ the table.
"""
# TODO(saeta): Implement snapshot functionality.
table = gen_bigtable_ops.bigtable_table(self._resource, name)
- return BigTable(name, snapshot, table)
+ return BigtableTable(name, snapshot, table)
-class BigTable(object):
- """BigTable is the entrypoint for reading and writing data in Cloud Bigtable.
+class BigtableTable(object):
+ """BigtableTable is the entrypoint for reading and writing data in Cloud
+ Bigtable.
- This BigTable class is the python representation of the Cloud Bigtable table
- within TensorFlow. Methods on this class allow data to be read from and
+ This BigtableTable class is the Python representation of the Cloud Bigtable
+ table within TensorFlow. Methods on this class allow data to be read from and
written to the Cloud Bigtable service in flexible and high performance
manners.
"""
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py b/tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py
index 62f1f4122b..78232fa0a6 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/custom_export_strategy.py
@@ -32,6 +32,7 @@ from tensorflow.python.framework import ops
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import loader as saved_model_loader
from tensorflow.python.saved_model import tag_constants
+from tensorflow.python.util import compat
_SPARSE_FLOAT_FEATURE_NAME_TEMPLATE = "%s_%d"
@@ -88,10 +89,12 @@ def make_custom_export_strategy(name,
len(sparse_float_indices), len(sparse_int_indices))
sorted_by_importance = sorted(
feature_importances.items(), key=lambda x: -x[1])
- assets_dir = os.path.join(result_dir, "assets.extra")
+ assets_dir = os.path.join(
+ compat.as_bytes(result_dir), compat.as_bytes("assets.extra"))
gfile.MakeDirs(assets_dir)
- with gfile.GFile(os.path.join(assets_dir, "feature_importances"),
- "w") as f:
+ with gfile.GFile(os.path.join(
+ compat.as_bytes(assets_dir),
+ compat.as_bytes("feature_importances")), "w") as f:
f.write("\n".join("%s, %f" % (k, v) for k, v in sorted_by_importance))
return result_dir
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py b/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
index 59a78515c6..38fa8c3834 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/estimator.py
@@ -22,6 +22,7 @@ from tensorflow.contrib.boosted_trees.estimator_batch import model
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
+from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.ops import math_ops
@@ -354,3 +355,45 @@ class GradientBoostedDecisionTreeRanker(estimator.Estimator):
model_dir=model_dir,
config=config,
feature_engineering_fn=feature_engineering_fn)
+
+
+class CoreGradientBoostedDecisionTreeEstimator(core_estimator.Estimator):
+ """An estimator using gradient boosted decision trees."""
+
+ def __init__(self,
+ learner_config,
+ examples_per_layer,
+ head,
+ num_trees=None,
+ feature_columns=None,
+ weight_column_name=None,
+ model_dir=None,
+ config=None,
+ label_keys=None,
+ feature_engineering_fn=None,
+ logits_modifier_function=None,
+ center_bias=True,
+ output_leaf_index=False):
+
+ def _model_fn(features, labels, mode, config):
+ return model.model_builder(
+ features=features,
+ labels=labels,
+ mode=mode,
+ config=config,
+ params={
+ 'head': head,
+ 'feature_columns': feature_columns,
+ 'learner_config': learner_config,
+ 'num_trees': num_trees,
+ 'weight_column_name': weight_column_name,
+ 'examples_per_layer': examples_per_layer,
+ 'center_bias': center_bias,
+ 'logits_modifier_function': logits_modifier_function,
+ 'use_core_libs': True,
+ 'output_leaf_index': output_leaf_index,
+ },
+ output_type=model.ModelBuilderOutputType.ESTIMATOR_SPEC)
+
+ super(CoreGradientBoostedDecisionTreeEstimator, self).__init__(
+ model_fn=_model_fn, model_dir=model_dir, config=config)
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py b/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
index 2c2dcb039d..f787d3cdb8 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/estimator_test.py
@@ -182,7 +182,7 @@ class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
- loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
+ loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
@@ -203,5 +203,32 @@ class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
model.predict(input_fn=_infer_ranking_train_input_fn)
+class CoreGradientBoostedDecisionTreeEstimator(test_util.TensorFlowTestCase):
+
+ def testTrainEvaluateInferDoesNotThrowError(self):
+ head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
+ loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
+
+ learner_config = learner_pb2.LearnerConfig()
+ learner_config.num_classes = 2
+ learner_config.constraints.max_tree_depth = 1
+ model_dir = tempfile.mkdtemp()
+ config = run_config.RunConfig()
+
+ est = estimator.CoreGradientBoostedDecisionTreeEstimator(
+ head=head_fn,
+ learner_config=learner_config,
+ num_trees=1,
+ examples_per_layer=3,
+ model_dir=model_dir,
+ config=config,
+ feature_columns=[core_feature_column.numeric_column("x")])
+
+ # Train for a few steps.
+ est.train(input_fn=_train_input_fn, steps=1000)
+ est.evaluate(input_fn=_eval_input_fn, steps=1)
+ est.predict(input_fn=_eval_input_fn)
+
+
if __name__ == "__main__":
googletest.main()
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/model.py b/tensorflow/contrib/boosted_trees/estimator_batch/model.py
index 0e8a56e6e9..2fbe72951a 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/model.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/model.py
@@ -29,7 +29,17 @@ from tensorflow.python.framework import ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_util
-def model_builder(features, labels, mode, params, config):
+class ModelBuilderOutputType(object):
+ MODEL_FN_OPS = 0
+ ESTIMATOR_SPEC = 1
+
+
+def model_builder(features,
+ labels,
+ mode,
+ params,
+ config,
+ output_type=ModelBuilderOutputType.MODEL_FN_OPS):
"""Multi-machine batch gradient descent tree model.
Args:
@@ -115,31 +125,53 @@ def model_builder(features, labels, mode, params, config):
return update_op
create_estimator_spec_op = getattr(head, "create_estimator_spec", None)
- if use_core_libs and callable(create_estimator_spec_op):
- model_fn_ops = head.create_estimator_spec(
- features=features,
- mode=mode,
- labels=labels,
- train_op_fn=_train_op_fn,
- logits=logits)
- model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(model_fn_ops)
- else:
- model_fn_ops = head.create_model_fn_ops(
- features=features,
- mode=mode,
- labels=labels,
- train_op_fn=_train_op_fn,
- logits=logits)
- if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
- model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
- gbdt_batch.LEAF_INDEX]
+
if num_trees:
if center_bias:
num_trees += 1
finalized_trees, attempted_trees = gbdt_model.get_number_of_trees_tensor()
- model_fn_ops.training_hooks.append(
+ training_hooks = [
trainer_hooks.StopAfterNTrees(num_trees, attempted_trees,
- finalized_trees))
+ finalized_trees)
+ ]
+
+ if output_type == ModelBuilderOutputType.MODEL_FN_OPS:
+ if use_core_libs and callable(create_estimator_spec_op):
+ model_fn_ops = head.create_estimator_spec(
+ features=features,
+ mode=mode,
+ labels=labels,
+ train_op_fn=_train_op_fn,
+ logits=logits)
+ model_fn_ops = estimator_utils.estimator_spec_to_model_fn_ops(
+ model_fn_ops)
+ else:
+ model_fn_ops = head.create_model_fn_ops(
+ features=features,
+ mode=mode,
+ labels=labels,
+ train_op_fn=_train_op_fn,
+ logits=logits)
+
+ if output_leaf_index and gbdt_batch.LEAF_INDEX in predictions_dict:
+ model_fn_ops.predictions[gbdt_batch.LEAF_INDEX] = predictions_dict[
+ gbdt_batch.LEAF_INDEX]
+
+ model_fn_ops.training_hooks.extend(training_hooks)
+ return model_fn_ops
+ elif output_type == ModelBuilderOutputType.ESTIMATOR_SPEC:
+ assert callable(create_estimator_spec_op)
+ estimator_spec = head.create_estimator_spec(
+ features=features,
+ mode=mode,
+ labels=labels,
+ train_op_fn=_train_op_fn,
+ logits=logits)
+
+ estimator_spec = estimator_spec._replace(
+ training_hooks=training_hooks + list(estimator_spec.training_hooks))
+ return estimator_spec
+
return model_fn_ops
diff --git a/tensorflow/contrib/boosted_trees/examples/boston.py b/tensorflow/contrib/boosted_trees/examples/boston.py
index e9dbdb0fd7..54c4ff059e 100644
--- a/tensorflow/contrib/boosted_trees/examples/boston.py
+++ b/tensorflow/contrib/boosted_trees/examples/boston.py
@@ -45,6 +45,7 @@ from tensorflow.contrib.boosted_trees.estimator_batch.estimator import GradientB
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn import learn_runner
+from tensorflow.python.util import compat
_BOSTON_NUM_FEATURES = 13
@@ -79,7 +80,8 @@ def _convert_fn(dtec, sorted_feature_names, num_dense, num_sparse_float,
num_sparse_int, export_dir, unused_eval_result):
universal_format = custom_export_strategy.convert_to_universal_format(
dtec, sorted_feature_names, num_dense, num_sparse_float, num_sparse_int)
- with tf.gfile.GFile(os.path.join(export_dir, "tree_proto"), "w") as f:
+ with tf.gfile.GFile(os.path.join(
+ compat.as_bytes(export_dir), compat.as_bytes("tree_proto")), "w") as f:
f.write(str(universal_format))
diff --git a/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc b/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
index 35b059f349..4fab2b0b7d 100644
--- a/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
+++ b/tensorflow/contrib/boosted_trees/lib/utils/batch_features.cc
@@ -16,6 +16,7 @@
#include "tensorflow/contrib/boosted_trees/lib/utils/batch_features.h"
#include "tensorflow/contrib/boosted_trees/lib/utils/macros.h"
#include "tensorflow/contrib/boosted_trees/lib/utils/tensor_utils.h"
+#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace boosted_trees {
@@ -96,9 +97,11 @@ Status BatchFeatures::Initialize(
"Sparse float feature shape incompatible with batch size."));
auto tensor_shape = TensorShape({shape_flat(0), shape_flat(1)});
auto order_dims = sparse::SparseTensor::VarDimArray({0, 1});
- sparse_float_feature_columns_.emplace_back(sparse_float_feature_indices,
- sparse_float_feature_values,
- tensor_shape, order_dims);
+ sparse::SparseTensor sparse_tensor;
+ TF_RETURN_IF_ERROR(sparse::SparseTensor::Create(
+ sparse_float_feature_indices, sparse_float_feature_values, tensor_shape,
+ order_dims, &sparse_tensor));
+ sparse_float_feature_columns_.push_back(std::move(sparse_tensor));
}
// Read sparse int features.
@@ -136,9 +139,11 @@ Status BatchFeatures::Initialize(
"Sparse int feature shape incompatible with batch size."));
auto tensor_shape = TensorShape({shape_flat(0), shape_flat(1)});
auto order_dims = sparse::SparseTensor::VarDimArray({0, 1});
- sparse_int_feature_columns_.emplace_back(sparse_int_feature_indices,
- sparse_int_feature_values,
- tensor_shape, order_dims);
+ sparse::SparseTensor sparse_tensor;
+ TF_RETURN_IF_ERROR(sparse::SparseTensor::Create(
+ sparse_int_feature_indices, sparse_int_feature_values, tensor_shape,
+ order_dims, &sparse_tensor));
+ sparse_int_feature_columns_.push_back(std::move(sparse_tensor));
}
return Status::OK();
}
diff --git a/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc b/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
index d8a6088648..30c37435fe 100644
--- a/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
+++ b/tensorflow/contrib/boosted_trees/lib/utils/examples_iterable_test.cc
@@ -43,27 +43,35 @@ TEST_F(ExamplesIterableTest, Iterate) {
test::AsTensor<int64>({0, 0, 2, 0, 3, 0, 4, 0}, {4, 2});
auto sparse_float_values1 = test::AsTensor<float>({-3.0f, 0.0f, 5.0f, 0.0f});
auto sparse_float_shape1 = TensorShape({8, 1});
- sparse::SparseTensor sparse_float_tensor1(
- sparse_float_indices1, sparse_float_values1, sparse_float_shape1);
+ sparse::SparseTensor sparse_float_tensor1;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_float_indices1, sparse_float_values1,
+ sparse_float_shape1, &sparse_float_tensor1));
auto sparse_float_indices2 = test::AsTensor<int64>(
{0, 1, 1, 0, 2, 1, 3, 0, 4, 1, 5, 0, 5, 1, 7, 0}, {8, 2});
auto sparse_float_values2 =
test::AsTensor<float>({1.f, 4.0f, 3.f, 7.0f, 4.3f, 9.0f, 0.8f, -4.0f});
auto sparse_float_shape2 = TensorShape({8, 2});
- sparse::SparseTensor sparse_float_tensor2(
- sparse_float_indices2, sparse_float_values2, sparse_float_shape2);
+ sparse::SparseTensor sparse_float_tensor2;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_float_indices2, sparse_float_values2,
+ sparse_float_shape2, &sparse_float_tensor2));
auto sparse_int_indices1 =
test::AsTensor<int64>({0, 0, 0, 1, 1, 0, 3, 0, 3, 1, 7, 0}, {6, 2});
auto sparse_int_values1 = test::AsTensor<int64>({1, 8, 0, 2, 0, 5});
auto sparse_int_shape1 = TensorShape({8, 2});
- sparse::SparseTensor sparse_int_tensor1(
- sparse_int_indices1, sparse_int_values1, sparse_int_shape1);
+ sparse::SparseTensor sparse_int_tensor1;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_int_indices1, sparse_int_values1,
+ sparse_int_shape1, &sparse_int_tensor1));
auto sparse_int_indices2 =
test::AsTensor<int64>({1, 0, 2, 0, 3, 0, 4, 0}, {4, 2});
auto sparse_int_values2 = test::AsTensor<int64>({7, 13, 4, 0});
auto sparse_int_shape2 = TensorShape({8, 1});
- sparse::SparseTensor sparse_int_tensor2(
- sparse_int_indices2, sparse_int_values2, sparse_int_shape2);
+ sparse::SparseTensor sparse_int_tensor2;
+ TF_ASSERT_OK(
+ sparse::SparseTensor::Create(sparse_int_indices2, sparse_int_values2,
+ sparse_int_shape2, &sparse_int_tensor2));
auto validate_example_features = [](int64 example_idx,
const Example& example) {
diff --git a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
index 1ee7f2395e..643d8d2498 100644
--- a/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
+++ b/tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
@@ -287,7 +287,8 @@ class GradientBoostedDecisionTreeModel(object):
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
- output_leaf_index=False):
+ output_leaf_index=False,
+ output_leaf_index_modes=None):
"""Construct a new GradientBoostedDecisionTreeModel function.
Args:
@@ -307,6 +308,9 @@ class GradientBoostedDecisionTreeModel(object):
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
+ output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
+ dictates when leaf indices will be outputted. By default, leaf indices
+ are only outputted in INFER mode.
Raises:
ValueError: if inputs are not valid.
@@ -404,7 +408,16 @@ class GradientBoostedDecisionTreeModel(object):
self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
learner_config.num_classes == 2)
+
+ if output_leaf_index_modes is None:
+ output_leaf_index_modes = [learn.ModeKeys.INFER]
+ elif not all(
+ mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,
+ learn.ModeKeys.INFER) for mode in output_leaf_index_modes):
+ raise ValueError("output_leaf_index_modes should only contain ModeKeys.")
+
self._output_leaf_index = output_leaf_index
+ self._output_leaf_index_modes = output_leaf_index_modes
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
"""Runs prediction and returns a dictionary of the prediction results.
@@ -435,8 +448,7 @@ class GradientBoostedDecisionTreeModel(object):
# the right stamp.
with ops.control_dependencies(ensemble_stats):
leaf_index = None
- # Only used in infer (predict), not used in train and eval.
- if self._output_leaf_index and mode == learn.ModeKeys.INFER:
+ if self._output_leaf_index and mode in self._output_leaf_index_modes:
predictions, _, leaf_index = (
prediction_ops).gradient_trees_prediction_verbose(
ensemble_handle,
diff --git a/tensorflow/contrib/checkpoint/python/containers.py b/tensorflow/contrib/checkpoint/python/containers.py
index 4d3d531299..242c1e8ba4 100644
--- a/tensorflow/contrib/checkpoint/python/containers.py
+++ b/tensorflow/contrib/checkpoint/python/containers.py
@@ -35,9 +35,9 @@ class UniqueNameTracker(data_structures.CheckpointableDataStructure):
self.slotdeps = tf.contrib.checkpoint.UniqueNameTracker()
slotdeps = self.slotdeps
slots = []
- slots.append(slotdeps.track(tfe.Variable(3.), "x")) # Named "x"
- slots.append(slotdeps.track(tfe.Variable(4.), "y"))
- slots.append(slotdeps.track(tfe.Variable(5.), "x")) # Named "x_1"
+ slots.append(slotdeps.track(tf.Variable(3.), "x")) # Named "x"
+ slots.append(slotdeps.track(tf.Variable(4.), "y"))
+ slots.append(slotdeps.track(tf.Variable(5.), "x")) # Named "x_1"
```
"""
diff --git a/tensorflow/contrib/cloud/README.md b/tensorflow/contrib/cloud/README.md
index 134ce057f4..a80d8965f3 100644
--- a/tensorflow/contrib/cloud/README.md
+++ b/tensorflow/contrib/cloud/README.md
@@ -1,8 +1,8 @@
# Cloud #
-## BigTable ##
+## Cloud Bigtable ##
-[Google Cloud BigTable](https://cloud.google.com/bigtable/) is a high
+[Google Cloud Bigtable](https://cloud.google.com/bigtable/) is a high
performance storage system that can store and serve training data. This contrib
package contains an experimental integration with TensorFlow.
diff --git a/tensorflow/contrib/cloud/__init__.py b/tensorflow/contrib/cloud/__init__.py
index af81106a68..8efd259946 100644
--- a/tensorflow/contrib/cloud/__init__.py
+++ b/tensorflow/contrib/cloud/__init__.py
@@ -25,8 +25,8 @@ from tensorflow.contrib.cloud.python.ops.bigquery_reader_ops import *
from tensorflow.contrib.cloud.python.ops.gcs_config_ops import *
if os.name != 'nt':
- from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigTable
from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableClient
+ from tensorflow.contrib.bigtable.python.ops.bigtable_api import BigtableTable
del os
@@ -34,8 +34,8 @@ from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'BigQueryReader',
- 'BigTable',
'BigtableClient',
+ 'BigtableTable',
'BlockCacheParams',
'configure_colab_session',
'configure_gcs',
diff --git a/tensorflow/contrib/cmake/CMakeLists.txt b/tensorflow/contrib/cmake/CMakeLists.txt
index a0a5b0e00c..6c93487e0d 100644
--- a/tensorflow/contrib/cmake/CMakeLists.txt
+++ b/tensorflow/contrib/cmake/CMakeLists.txt
@@ -145,26 +145,41 @@ if(WIN32)
# temporary fix for #18241
add_definitions(-DEIGEN_DEFAULT_DENSE_INDEX_TYPE=std::int64_t)
endif()
- add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00 -DLANG_CXX11)
- add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
+ add_definitions(-DNOMINMAX -D_WIN32_WINNT=0x0A00)
+ add_definitions(-DWIN32_LEAN_AND_MEAN -DNOGDI -DPLATFORM_WINDOWS)
add_definitions(-DTENSORFLOW_USE_EIGEN_THREADPOOL -DEIGEN_HAS_C99_MATH)
add_definitions(-DTF_COMPILE_LIBRARY)
- add_definitions(/bigobj /nologo /EHsc /GF /MP /Gm-)
+ add_compile_options(/bigobj /GF /MP /Gm-)
# Suppress warnings to reduce build log size.
- add_definitions(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
- add_definitions(/wd4099 /wd4146 /wd4267 /wd4305 /wd4307)
- add_definitions(/wd4715 /wd4722 /wd4723 /wd4838 /wd4309 /wd4334)
- add_definitions(/wd4003 /wd4244 /wd4267 /wd4503 /wd4506 /wd4800 /wd4996)
+ add_compile_options(/wd4267 /wd4244 /wd4800 /wd4503 /wd4554 /wd4996 /wd4348 /wd4018)
+ add_compile_options(/wd4099 /wd4146 /wd4267 /wd4305 /wd4307)
+ add_compile_options(/wd4715 /wd4722 /wd4723 /wd4838 /wd4309 /wd4334)
+ add_compile_options(/wd4003 /wd4244 /wd4267 /wd4503 /wd4506 /wd4800 /wd4996)
# Suppress linker warnings.
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /ignore:4049 /ignore:4197 /ignore:4217 /ignore:4221")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
set(CMAKE_CXX_FLAGS_DEBUG "/D_DEBUG /MDd /Ob2")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /D_ITERATOR_DEBUG_LEVEL=0")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /D_ITERATOR_DEBUG_LEVEL=0")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /D_ITERATOR_DEBUG_LEVEL=0")
+ set(compiler_flags
+ CMAKE_CXX_FLAGS
+ CMAKE_CXX_FLAGS_DEBUG
+ CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_C_FLAGS
+ CMAKE_C_FLAGS_DEBUG
+ CMAKE_C_FLAGS_RELEASE
+ )
+ # No exception
+ foreach(flag ${compiler_flags})
+ string(REPLACE "/EHsc" "/EHs-c-" ${flag} "${${flag}}")
+ endforeach()
+ add_definitions(/D_HAS_EXCEPTIONS=0)
+ # Suppress 'noexcept used with no exception handling mode specified' warning
+ add_compile_options(/wd4577)
+
# Try to avoid flaky failures due to failed generation of generate.stamp files.
set(CMAKE_SUPPRESS_REGENERATION ON)
endif()
@@ -379,16 +394,20 @@ if (tensorflow_ENABLE_GPU)
# by default we assume compute cabability 3.5 and 5.2. If you change this change it in
# CUDA_NVCC_FLAGS and cuda_config.h below
- set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode arch=compute_30,code=\"sm_30,compute_30\";-gencode arch=compute_35,code=\"sm_35,compute_35\";-gencode arch=compute_52,code=\"sm_52,compute_52\")
+ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode arch=compute_37,code=\"sm_37,compute_37\")
+ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode arch=compute_52,code=\"sm_52,compute_52\")
+ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode arch=compute_60,code=\"sm_60,compute_60\")
+ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode arch=compute_61,code=\"sm_61,compute_61\")
+ set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-gencode arch=compute_70,code=\"sm_70,compute_70\")
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};--include-path ${PROJECT_BINARY_DIR}/$\{build_configuration\};--expt-relaxed-constexpr)
set(CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS};-ftz=true) # Flush denormals to zero
set(CUDA_INCLUDE ${CUDA_TOOLKIT_TARGET_DIR} ${CUDA_TOOLKIT_TARGET_DIR}/extras/CUPTI/include)
include_directories(${CUDA_INCLUDE})
if (WIN32)
- add_definitions(-DGOOGLE_CUDA=1 -DTF_EXTRA_CUDA_CAPABILITIES=3.0,3.5,5.2)
+ add_definitions(-DGOOGLE_CUDA=1 -DTF_EXTRA_CUDA_CAPABILITIES=3.7,5.2,6.0,6.1,7.0)
else (WIN32)
- # Without these double quotes, cmake in Linux makes it "-DTF_EXTRA_CUDA_CAPABILITIES=3.0, -D3.5, -D5.2" for cc, which incurs build breaks
- add_definitions(-DGOOGLE_CUDA=1 -D"TF_EXTRA_CUDA_CAPABILITIES=3.0,3.5,5.2")
+ # Without these double quotes, cmake in Linux makes it "-DTF_EXTRA_CUDA_CAPABILITIES=3.7, -D5.2, ..." for cc, which incurs build breaks
+ add_definitions(-DGOOGLE_CUDA=1 -D"TF_EXTRA_CUDA_CAPABILITIES=3.7,5.2,6.0,6.1,7.0")
endif (WIN32)
if (WIN32)
@@ -437,7 +456,7 @@ if (tensorflow_ENABLE_GPU)
FILE(WRITE ${tensorflow_source_dir}/third_party/gpus/cuda/cuda_config.h
"#ifndef CUDA_CUDA_CONFIG_H_\n"
"#define CUDA_CUDA_CONFIG_H_\n"
- "#define TF_CUDA_CAPABILITIES CudaVersion(\"3.0\"),CudaVersion(\"3.5\"),CudaVersion(\"5.2\")\n"
+ "#define TF_CUDA_CAPABILITIES CudaVersion(\"3.7\"),CudaVersion(\"5.2\"),CudaVersion(\"6.0\"),CudaVersion(\"6.1\"),CudaVersion(\"7.0\")\n"
"#define TF_CUDA_VERSION \"64_${short_CUDA_VER}\"\n"
"#define TF_CUDNN_VERSION \"64_${tensorflow_CUDNN_VERSION}\"\n"
"#define TF_CUDA_TOOLKIT_PATH \"${CUDA_TOOLKIT_ROOT_DIR}\"\n"
diff --git a/tensorflow/contrib/cmake/python_modules.txt b/tensorflow/contrib/cmake/python_modules.txt
index 40041d9c88..75e00f3267 100644
--- a/tensorflow/contrib/cmake/python_modules.txt
+++ b/tensorflow/contrib/cmake/python_modules.txt
@@ -62,6 +62,8 @@ tensorflow/python/saved_model
tensorflow/python/summary
tensorflow/python/summary/writer
tensorflow/python/tools
+tensorflow/python/tools/api
+tensorflow/python/tools/api/generator
tensorflow/python/training
tensorflow/python/training/checkpointable
tensorflow/python/user_ops
@@ -69,7 +71,6 @@ tensorflow/python/util
tensorflow/python/util/protobuf
tensorflow/tools
tensorflow/tools/api
-tensorflow/tools/api/generator
tensorflow/tools/graph_transforms
tensorflow/contrib
tensorflow/contrib/all_reduce
diff --git a/tensorflow/contrib/cmake/tf_core_kernels.cmake b/tensorflow/contrib/cmake/tf_core_kernels.cmake
index 844f62649d..7b892ba248 100644
--- a/tensorflow/contrib/cmake/tf_core_kernels.cmake
+++ b/tensorflow/contrib/cmake/tf_core_kernels.cmake
@@ -68,6 +68,7 @@ if(tensorflow_BUILD_CONTRIB_KERNELS)
"${tensorflow_source_dir}/tensorflow/contrib/coder/kernels/range_coder_ops.cc"
"${tensorflow_source_dir}/tensorflow/contrib/coder/kernels/range_coder_ops_util.cc"
"${tensorflow_source_dir}/tensorflow/contrib/coder/ops/coder_ops.cc"
+ "${tensorflow_source_dir}/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc"
"${tensorflow_source_dir}/tensorflow/contrib/data/kernels/csv_dataset_op.cc"
"${tensorflow_source_dir}/tensorflow/contrib/data/kernels/directed_interleave_dataset_op.cc"
"${tensorflow_source_dir}/tensorflow/contrib/data/kernels/ignore_errors_dataset_op.cc"
diff --git a/tensorflow/contrib/cmake/tf_python.cmake b/tensorflow/contrib/cmake/tf_python.cmake
index 8a9172b43c..32b185f07b 100755
--- a/tensorflow/contrib/cmake/tf_python.cmake
+++ b/tensorflow/contrib/cmake/tf_python.cmake
@@ -736,8 +736,8 @@ endif()
# Generate API __init__.py files.
########################################################
-# Parse tensorflow/tools/api/generator/BUILD to get list of generated files.
-FILE(READ ${tensorflow_source_dir}/tensorflow/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
+# Parse tensorflow/python/tools/api/generator/BUILD to get list of generated files.
+FILE(READ ${tensorflow_source_dir}/tensorflow/python/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
STRING(REGEX MATCH "# BEGIN GENERATED FILES.*# END GENERATED FILES" api_init_files_text ${api_generator_BUILD_text})
string(REPLACE "# BEGIN GENERATED FILES" "" api_init_files_text ${api_init_files_text})
string(REPLACE "# END GENERATED FILES" "" api_init_files_text ${api_init_files_text})
@@ -781,7 +781,7 @@ if (tensorflow_ENABLE_MKL_SUPPORT)
# Run create_python_api.py to generate API init files.
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python PATH=${PY_RUNTIME_ENV} ${PYTHON_EXECUTABLE}
- "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+ "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
"--root_init_template=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/api_template.__init__.py"
"--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow"
"--package=tensorflow.python"
@@ -803,7 +803,7 @@ else (tensorflow_ENABLE_MKL_SUPPORT)
# Run create_python_api.py to generate API init files.
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python ${PYTHON_EXECUTABLE}
- "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+ "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
"--root_init_template=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/api_template.__init__.py"
"--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow"
"--package=tensorflow.python"
@@ -824,8 +824,8 @@ add_dependencies(tf_python_api tf_python_ops)
# Generate API __init__.py files for tf.estimator.
########################################################
-# Parse tensorflow/tools/api/generator/BUILD to get list of generated files.
-FILE(READ ${tensorflow_source_dir}/tensorflow/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
+# Parse tensorflow/python/tools/api/generator/BUILD to get list of generated files.
+FILE(READ ${tensorflow_source_dir}/tensorflow/python/tools/api/generator/api_gen.bzl api_generator_BUILD_text)
STRING(REGEX MATCH "# BEGIN GENERATED ESTIMATOR FILES.*# END GENERATED ESTIMATOR FILES" api_init_files_text ${api_generator_BUILD_text})
string(REPLACE "# BEGIN GENERATED ESTIMATOR FILES" "" api_init_files_text ${api_init_files_text})
string(REPLACE "# END GENERATED ESTIMATOR FILES" "" api_init_files_text ${api_init_files_text})
@@ -849,7 +849,7 @@ add_custom_command(
# Run create_python_api.py to generate API init files.
COMMAND ${CMAKE_COMMAND} -E env PYTHONPATH=${CMAKE_CURRENT_BINARY_DIR}/tf_python ${PYTHON_EXECUTABLE}
- "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/tools/api/generator/create_python_api.py"
+ "${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/tools/api/generator/create_python_api.py"
"--apidir=${CMAKE_CURRENT_BINARY_DIR}/tf_python/tensorflow/python/estimator/api"
"--package=tensorflow.python.estimator"
"--apiname=estimator"
diff --git a/tensorflow/contrib/cmake/tf_tests.cmake b/tensorflow/contrib/cmake/tf_tests.cmake
index eb9482dc25..b2330c4e34 100644
--- a/tensorflow/contrib/cmake/tf_tests.cmake
+++ b/tensorflow/contrib/cmake/tf_tests.cmake
@@ -193,6 +193,7 @@ if (tensorflow_BUILD_PYTHON_TESTS)
# flaky test
"${tensorflow_source_dir}/tensorflow/python/profiler/internal/run_metadata_test.py"
"${tensorflow_source_dir}/tensorflow/python/profiler/model_analyzer_test.py"
+ "${tensorflow_source_dir}/tensorflow/python/data/kernel_tests/map_dataset_op_test.py"
# Fails because uses data dependencies with bazel
"${tensorflow_source_dir}/tensorflow/python/saved_model/saved_model_test.py"
"${tensorflow_source_dir}/tensorflow/contrib/image/python/kernel_tests/sparse_image_warp_test.py"
@@ -216,7 +217,8 @@ if (tensorflow_BUILD_PYTHON_TESTS)
${tensorflow_source_dir}/tensorflow/python/kernel_tests/duplicate_op_test.py
${tensorflow_source_dir}/tensorflow/python/kernel_tests/invalid_op_test.py
${tensorflow_source_dir}/tensorflow/python/kernel_tests/ackermann_test.py
-
+ # Tests too large to run.
+ ${tensorflow_source_dir}/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py
)
if (WIN32)
set(tf_test_src_py_exclude
diff --git a/tensorflow/contrib/copy_graph/python/util/copy_elements.py b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
index a0dd3881a8..5931c8a279 100644
--- a/tensorflow/contrib/copy_graph/python/util/copy_elements.py
+++ b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
@@ -18,7 +18,7 @@ These functions allow for recursive copying of elements (ops and variables)
from one graph to another. The copied elements are initialized inside a
user-specified scope in the other graph. There are separate functions to
copy ops and variables.
-There is also a function to retrive the copied version of an op from the
+There is also a function to retrieve the copied version of an op from the
first graph inside a scope in the second graph.
@@copy_op_to_graph
@@ -77,7 +77,7 @@ def copy_variable_to_graph(org_instance, to_graph, scope=''):
else:
collections.append(scope + '/' + name)
- #See if its trainable.
+ #See if it's trainable.
trainable = (
org_instance in org_instance.graph.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES))
@@ -162,7 +162,7 @@ def copy_op_to_graph(org_instance, to_graph, variables, scope=''):
if isinstance(org_instance, ops.Tensor):
- #If its a Tensor, it is one of the outputs of the underlying
+ #If it's a Tensor, it is one of the outputs of the underlying
#op. Therefore, copy the op itself and return the appropriate
#output.
op = org_instance.op
diff --git a/tensorflow/contrib/crf/__init__.py b/tensorflow/contrib/crf/__init__.py
index 046c509626..615e62b16f 100644
--- a/tensorflow/contrib/crf/__init__.py
+++ b/tensorflow/contrib/crf/__init__.py
@@ -20,6 +20,7 @@ See the @{$python/contrib.crf} guide.
@@crf_decode
@@crf_log_likelihood
@@crf_log_norm
+@@crf_multitag_sequence_score
@@crf_sequence_score
@@crf_unary_score
@@CrfDecodeBackwardRnnCell
@@ -36,6 +37,7 @@ from tensorflow.contrib.crf.python.ops.crf import crf_binary_score
from tensorflow.contrib.crf.python.ops.crf import crf_decode
from tensorflow.contrib.crf.python.ops.crf import crf_log_likelihood
from tensorflow.contrib.crf.python.ops.crf import crf_log_norm
+from tensorflow.contrib.crf.python.ops.crf import crf_multitag_sequence_score
from tensorflow.contrib.crf.python.ops.crf import crf_sequence_score
from tensorflow.contrib.crf.python.ops.crf import crf_unary_score
from tensorflow.contrib.crf.python.ops.crf import CrfDecodeBackwardRnnCell
diff --git a/tensorflow/contrib/crf/python/kernel_tests/crf_test.py b/tensorflow/contrib/crf/python/kernel_tests/crf_test.py
index 74f2ec22ff..f56a973f6f 100644
--- a/tensorflow/contrib/crf/python/kernel_tests/crf_test.py
+++ b/tensorflow/contrib/crf/python/kernel_tests/crf_test.py
@@ -31,6 +31,15 @@ from tensorflow.python.platform import test
class CrfTest(test.TestCase):
+ def calculateSequenceScore(self, inputs, transition_params, tag_indices,
+ sequence_lengths):
+ expected_unary_score = sum(
+ inputs[i][tag_indices[i]] for i in range(sequence_lengths))
+ expected_binary_score = sum(
+ transition_params[tag_indices[i], tag_indices[i + 1]]
+ for i in range(sequence_lengths - 1))
+ return expected_unary_score + expected_binary_score
+
def testCrfSequenceScore(self):
transition_params = np.array(
[[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
@@ -60,14 +69,55 @@ class CrfTest(test.TestCase):
transition_params=constant_op.constant(transition_params))
sequence_score = array_ops.squeeze(sequence_score, [0])
tf_sequence_score = sess.run(sequence_score)
- expected_unary_score = sum(inputs[i][tag_indices[i]]
- for i in range(sequence_lengths))
- expected_binary_score = sum(
- transition_params[tag_indices[i], tag_indices[i + 1]]
- for i in range(sequence_lengths - 1))
- expected_sequence_score = expected_unary_score + expected_binary_score
+ expected_sequence_score = self.calculateSequenceScore(
+ inputs, transition_params, tag_indices, sequence_lengths)
self.assertAllClose(tf_sequence_score, expected_sequence_score)
+ def testCrfMultiTagSequenceScore(self):
+ transition_params = np.array(
+ [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32)
+ # Test both the length-1 and regular cases.
+ sequence_lengths_list = [
+ np.array(3, dtype=np.int32),
+ np.array(1, dtype=np.int32)
+ ]
+ inputs_list = [
+ np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]],
+ dtype=np.float32),
+ np.array([[4, 5, -3]],
+ dtype=np.float32),
+ ]
+ tag_bitmap_list = [
+ np.array(
+ [[True, True, False], [True, False, True], [False, True, True],
+ [True, False, True]],
+ dtype=np.bool),
+ np.array([[True, True, False]], dtype=np.bool)
+ ]
+ for sequence_lengths, inputs, tag_bitmap in zip(
+ sequence_lengths_list, inputs_list, tag_bitmap_list):
+ with self.test_session() as sess:
+ sequence_score = crf.crf_multitag_sequence_score(
+ inputs=array_ops.expand_dims(inputs, 0),
+ tag_bitmap=array_ops.expand_dims(tag_bitmap, 0),
+ sequence_lengths=array_ops.expand_dims(sequence_lengths, 0),
+ transition_params=constant_op.constant(transition_params))
+ sequence_score = array_ops.squeeze(sequence_score, [0])
+ tf_sum_sequence_score = sess.run(sequence_score)
+ all_indices_list = [
+ single_index_bitmap.nonzero()[0]
+ for single_index_bitmap in tag_bitmap[:sequence_lengths]
+ ]
+ expected_sequence_scores = [
+ self.calculateSequenceScore(inputs, transition_params, indices,
+ sequence_lengths)
+ for indices in itertools.product(*all_indices_list)
+ ]
+ expected_log_sum_exp_sequence_scores = np.logaddexp.reduce(
+ expected_sequence_scores)
+ self.assertAllClose(tf_sum_sequence_score,
+ expected_log_sum_exp_sequence_scores)
+
def testCrfUnaryScore(self):
inputs = np.array(
[[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32)
diff --git a/tensorflow/contrib/crf/python/ops/crf.py b/tensorflow/contrib/crf/python/ops/crf.py
index 2d2cbdc199..8a7ff61bc8 100644
--- a/tensorflow/contrib/crf/python/ops/crf.py
+++ b/tensorflow/contrib/crf/python/ops/crf.py
@@ -67,7 +67,7 @@ __all__ = [
"crf_sequence_score", "crf_log_norm", "crf_log_likelihood",
"crf_unary_score", "crf_binary_score", "CrfForwardRnnCell",
"viterbi_decode", "crf_decode", "CrfDecodeForwardRnnCell",
- "CrfDecodeBackwardRnnCell"
+ "CrfDecodeBackwardRnnCell", "crf_multitag_sequence_score"
]
@@ -114,6 +114,56 @@ def crf_sequence_score(inputs, tag_indices, sequence_lengths,
false_fn=_multi_seq_fn)
+def crf_multitag_sequence_score(inputs, tag_bitmap, sequence_lengths,
+ transition_params):
+ """Computes the unnormalized score of all tag sequences matching tag_bitmap.
+
+ tag_bitmap enables more than one tag to be considered correct at each time
+ step. This is useful when an observed output at a given time step is
+ consistent with more than one tag, and thus the log likelihood of that
+ observation must take into account all possible consistent tags.
+
+ Using one-hot vectors in tag_bitmap gives results identical to
+ crf_sequence_score.
+
+ Args:
+ inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
+ to use as input to the CRF layer.
+ tag_bitmap: A [batch_size, max_seq_len, num_tags] boolean tensor
+ representing all active tags at each index for which to calculate the
+ unnormalized score.
+ sequence_lengths: A [batch_size] vector of true sequence lengths.
+ transition_params: A [num_tags, num_tags] transition matrix.
+ Returns:
+ sequence_scores: A [batch_size] vector of unnormalized sequence scores.
+ """
+
+ # If max_seq_len is 1, we skip the score calculation and simply gather the
+ # unary potentials of all active tags.
+ def _single_seq_fn():
+ filtered_inputs = array_ops.where(
+ tag_bitmap, inputs,
+ array_ops.fill(array_ops.shape(inputs), float("-inf")))
+ return math_ops.reduce_logsumexp(
+ filtered_inputs, axis=[1, 2], keepdims=False)
+
+ def _multi_seq_fn():
+ # Compute the logsumexp of all scores of sequences matching the given tags.
+ filtered_inputs = array_ops.where(
+ tag_bitmap, inputs,
+ array_ops.fill(array_ops.shape(inputs), float("-inf")))
+ return crf_log_norm(
+ inputs=filtered_inputs,
+ sequence_lengths=sequence_lengths,
+ transition_params=transition_params)
+
+ return utils.smart_cond(
+ pred=math_ops.equal(inputs.shape[1].value or array_ops.shape(inputs)[1],
+ 1),
+ true_fn=_single_seq_fn,
+ false_fn=_multi_seq_fn)
+
+
def crf_log_norm(inputs, sequence_lengths, transition_params):
"""Computes the normalization for a CRF.
diff --git a/tensorflow/contrib/data/__init__.py b/tensorflow/contrib/data/__init__.py
index 675330716b..7878e46e88 100644
--- a/tensorflow/contrib/data/__init__.py
+++ b/tensorflow/contrib/data/__init__.py
@@ -52,6 +52,7 @@ See @{$guide/datasets$Importing Data} for an overview.
@@prefetch_to_device
@@read_batch_features
@@rejection_resample
+@@reduce_dataset
@@sample_from_datasets
@@scan
@@shuffle_and_repeat
@@ -77,6 +78,7 @@ from tensorflow.contrib.data.python.ops.counter import Counter
from tensorflow.contrib.data.python.ops.enumerate_ops import enumerate_dataset
from tensorflow.contrib.data.python.ops.error_ops import ignore_errors
from tensorflow.contrib.data.python.ops.get_single_element import get_single_element
+from tensorflow.contrib.data.python.ops.get_single_element import reduce_dataset
from tensorflow.contrib.data.python.ops.grouping import bucket_by_sequence_length
from tensorflow.contrib.data.python.ops.grouping import group_by_reducer
from tensorflow.contrib.data.python.ops.grouping import group_by_window
diff --git a/tensorflow/contrib/data/kernels/BUILD b/tensorflow/contrib/data/kernels/BUILD
index 7b69e10441..566cbb246a 100644
--- a/tensorflow/contrib/data/kernels/BUILD
+++ b/tensorflow/contrib/data/kernels/BUILD
@@ -71,8 +71,19 @@ cc_library(
)
cc_library(
+ name = "assert_next_dataset_op",
+ srcs = ["assert_next_dataset_op.cc"],
+ deps = [
+ "//tensorflow/core:framework_headers_lib",
+ "//third_party/eigen3",
+ "@protobuf_archive//:protobuf_headers",
+ ],
+)
+
+cc_library(
name = "dataset_kernels",
deps = [
+ ":assert_next_dataset_op",
":csv_dataset_op",
":directed_interleave_dataset_op",
":ignore_errors_dataset_op",
diff --git a/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc b/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc
new file mode 100644
index 0000000000..95b8e1f7fd
--- /dev/null
+++ b/tensorflow/contrib/data/kernels/assert_next_dataset_op.cc
@@ -0,0 +1,152 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <map>
+
+#include "tensorflow/core/framework/dataset.h"
+#include "tensorflow/core/framework/partial_tensor_shape.h"
+#include "tensorflow/core/framework/tensor.h"
+
+namespace tensorflow {
+namespace {
+
+// See documentation in ../ops/dataset_ops.cc for a high-level
+// description of the following op.
+class AssertNextDatasetOp : public UnaryDatasetOpKernel {
+ public:
+ explicit AssertNextDatasetOp(OpKernelConstruction* ctx)
+ : UnaryDatasetOpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
+ }
+
+ protected:
+ void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
+ DatasetBase** output) override {
+ std::vector<string> transformations;
+ OP_REQUIRES_OK(ctx, ParseVectorArgument<string>(ctx, "transformations",
+ &transformations));
+ *output =
+ new Dataset(ctx, input, transformations, output_types_, output_shapes_);
+ }
+
+ private:
+ class Dataset : public GraphDatasetBase {
+ public:
+ Dataset(OpKernelContext* ctx, const DatasetBase* input,
+ const std::vector<string>& transformations,
+ const DataTypeVector& output_types,
+ const std::vector<PartialTensorShape>& output_shapes)
+ : GraphDatasetBase(ctx),
+ input_(input),
+ transformations_(transformations),
+ output_types_(output_types),
+ output_shapes_(output_shapes) {
+ input_->Ref();
+ }
+
+ ~Dataset() override { input_->Unref(); }
+
+ std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ const string& prefix) const override {
+ return std::unique_ptr<IteratorBase>(
+ new Iterator({this, strings::StrCat(prefix, "::Assert")}));
+ }
+
+ const DataTypeVector& output_dtypes() const override {
+ return output_types_;
+ }
+ const std::vector<PartialTensorShape>& output_shapes() const override {
+ return output_shapes_;
+ }
+
+ string DebugString() const override {
+ return "AssertNextDatasetOp::Dataset";
+ }
+
+ protected:
+ Status AsGraphDefInternal(OpKernelContext* ctx, DatasetGraphDefBuilder* b,
+ Node** output) const override {
+ Node* input_graph_node = nullptr;
+ TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
+ Node* transformations_node = nullptr;
+ TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
+ TF_RETURN_IF_ERROR(b->AddDataset(
+ this, {input_graph_node, transformations_node}, output));
+ return Status::OK();
+ }
+
+ private:
+ class Iterator : public DatasetIterator<Dataset> {
+ public:
+ explicit Iterator(const Params& params)
+ : DatasetIterator<Dataset>(params) {}
+
+ Status Initialize(IteratorContext* ctx) override {
+ std::vector<string> tokens =
+ str_util::Split(prefix(), ':', str_util::SkipEmpty());
+ if (dataset()->transformations_.size() > tokens.size() - 2) {
+ return errors::InvalidArgument(
+ "Asserted next ", dataset()->transformations_.size(),
+ " transformations but encountered only ", tokens.size() - 2, ".");
+ }
+ int n = tokens.size();
+ for (size_t i = 0; i < dataset()->transformations_.size(); ++i) {
+ if (dataset()->transformations_[i] != tokens[n - 2 - i]) {
+ return errors::InvalidArgument(
+ "Asserted ", dataset()->transformations_[i],
+ " transformation at offset ", i, " but encountered ",
+ tokens[n - 2 - i], " transformation instead.");
+ }
+ }
+ return dataset()->input_->MakeIterator(ctx, prefix(), &input_impl_);
+ }
+
+ Status GetNextInternal(IteratorContext* ctx,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) override {
+ return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
+ }
+
+ protected:
+ Status SaveInternal(IteratorStateWriter* writer) override {
+ TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_));
+ return Status::OK();
+ }
+
+ Status RestoreInternal(IteratorContext* ctx,
+ IteratorStateReader* reader) override {
+ TF_RETURN_IF_ERROR(RestoreParent(ctx, reader, input_impl_));
+ return Status::OK();
+ }
+
+ private:
+ std::unique_ptr<IteratorBase> input_impl_;
+ };
+
+ const DatasetBase* input_;
+ const std::vector<string> transformations_;
+ const DataTypeVector output_types_;
+ const std::vector<PartialTensorShape> output_shapes_;
+ };
+
+ DataTypeVector output_types_;
+ std::vector<PartialTensorShape> output_shapes_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("AssertNextDataset").Device(DEVICE_CPU),
+ AssertNextDatasetOp);
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/contrib/data/kernels/csv_dataset_op.cc b/tensorflow/contrib/data/kernels/csv_dataset_op.cc
index 4657807785..f7e3ed886c 100644
--- a/tensorflow/contrib/data/kernels/csv_dataset_op.cc
+++ b/tensorflow/contrib/data/kernels/csv_dataset_op.cc
@@ -18,7 +18,10 @@ limitations under the License.
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
+#include "tensorflow/core/lib/io/inputstream_interface.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
+#include "tensorflow/core/lib/io/zlib_compression_options.h"
+#include "tensorflow/core/lib/io/zlib_inputstream.h"
namespace tensorflow {
namespace {
@@ -37,6 +40,10 @@ class CSVDatasetOp : public DatasetOpKernel {
ctx, filenames_tensor->dims() <= 1,
errors::InvalidArgument("`filenames` must be a scalar or a vector."));
+ string compression_type;
+ OP_REQUIRES_OK(ctx, ParseScalarArgument<string>(ctx, "compression_type",
+ &compression_type));
+
OpInputList record_defaults_list;
OP_REQUIRES_OK(ctx,
ctx->input_list("record_defaults", &record_defaults_list));
@@ -86,6 +93,19 @@ class CSVDatasetOp : public DatasetOpKernel {
filenames.push_back(filenames_tensor->flat<string>()(i));
}
+ io::ZlibCompressionOptions zlib_compression_options =
+ io::ZlibCompressionOptions::DEFAULT();
+ if (compression_type == "ZLIB") {
+ zlib_compression_options = io::ZlibCompressionOptions::DEFAULT();
+ } else if (compression_type == "GZIP") {
+ zlib_compression_options = io::ZlibCompressionOptions::GZIP();
+ } else {
+ OP_REQUIRES(ctx, compression_type.empty(),
+ errors::InvalidArgument(
+ "Unsupported compression_type: ", compression_type, "."));
+ }
+ zlib_compression_options.input_buffer_size = buffer_size;
+
std::vector<int64> select_cols;
select_cols.reserve(select_cols_tensor->NumElements());
for (int i = 0; i < select_cols_tensor->NumElements(); ++i) {
@@ -103,7 +123,8 @@ class CSVDatasetOp : public DatasetOpKernel {
ctx, select_cols.empty() || select_cols.front() >= 0,
errors::InvalidArgument("select_cols should be non-negative indices"));
- *output = new Dataset(ctx, std::move(filenames), header, buffer_size,
+ *output = new Dataset(ctx, std::move(filenames), header,
+ std::move(compression_type), zlib_compression_options,
output_types_, output_shapes_,
std::move(record_defaults), std::move(select_cols),
use_quote_delim, delim[0], std::move(na_value));
@@ -113,21 +134,24 @@ class CSVDatasetOp : public DatasetOpKernel {
class Dataset : public GraphDatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<string> filenames, bool header,
- int64 buffer_size, const DataTypeVector& output_types,
+ string compression_type, io::ZlibCompressionOptions options,
+ const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
std::vector<Tensor> record_defaults, std::vector<int64> select_cols,
bool use_quote_delim, char delim, string na_value)
: GraphDatasetBase(ctx),
filenames_(std::move(filenames)),
header_(header),
- buffer_size_(buffer_size),
out_type_(output_types),
output_shapes_(output_shapes),
record_defaults_(std::move(record_defaults)),
select_cols_(std::move(select_cols)),
use_quote_delim_(use_quote_delim),
delim_(delim),
- na_value_(std::move(na_value)) {}
+ na_value_(std::move(na_value)),
+ use_compression_(!compression_type.empty()),
+ compression_type_(std::move(compression_type)),
+ options_(options) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
@@ -146,10 +170,45 @@ class CSVDatasetOp : public DatasetOpKernel {
protected:
Status AsGraphDefInternal(DatasetGraphDefBuilder* b,
Node** output) const override {
- // TODO(rachelim): Implement this
- std::vector<Node*> input_tensors;
- TF_RETURN_IF_ERROR(b->AddDataset(this, input_tensors, output));
- return errors::Unimplemented("CSVDataset: AsGraphDefInternal");
+ Node* filenames = nullptr;
+ Node* compression_type = nullptr;
+ Node* buffer_size = nullptr;
+ Node* header = nullptr;
+ Node* delim = nullptr;
+ Node* use_quote_delim = nullptr;
+ Node* na_value = nullptr;
+ Node* select_cols = nullptr;
+
+ std::vector<Node*> record_defaults;
+ record_defaults.reserve(record_defaults_.size());
+ for (const Tensor& t : record_defaults_) {
+ Node* node;
+ TF_RETURN_IF_ERROR(b->AddTensor(t, &node));
+ record_defaults.emplace_back(node);
+ }
+
+ TF_RETURN_IF_ERROR(b->AddVector(filenames_, &filenames));
+ TF_RETURN_IF_ERROR(b->AddScalar(compression_type_, &compression_type));
+ TF_RETURN_IF_ERROR(
+ b->AddScalar(options_.input_buffer_size, &buffer_size));
+ TF_RETURN_IF_ERROR(b->AddScalar(header_, &header));
+
+ string delim_string(1, delim_);
+ TF_RETURN_IF_ERROR(b->AddScalar(delim_string, &delim));
+ TF_RETURN_IF_ERROR(b->AddScalar(use_quote_delim_, &use_quote_delim));
+ TF_RETURN_IF_ERROR(b->AddScalar(na_value_, &na_value));
+ TF_RETURN_IF_ERROR(b->AddVector(select_cols_, &select_cols));
+
+ TF_RETURN_IF_ERROR(b->AddDataset(
+ this,
+ {std::make_pair(0, filenames), std::make_pair(1, compression_type),
+ std::make_pair(2, buffer_size), std::make_pair(3, header),
+ std::make_pair(4, delim), std::make_pair(5, use_quote_delim),
+ std::make_pair(6, na_value),
+ std::make_pair(7, select_cols)}, // Single tensor inputs
+ {std::make_pair(8, record_defaults)}, // Tensor list inputs
+ {}, output));
+ return Status::OK();
}
private:
@@ -201,14 +260,58 @@ class CSVDatasetOp : public DatasetOpKernel {
protected:
Status SaveInternal(IteratorStateWriter* writer) override {
mutex_lock l(mu_);
- // TODO(rachelim): Implement save
- return errors::Unimplemented("CSVDataset: SaveInternal");
+ TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("current_file_index"),
+ current_file_index_));
+ // `input_stream_` is empty if
+ // 1. GetNext has not been called even once.
+ // 2. All files have been read and the iterator has been exhausted.
+ if (input_stream_ && num_buffer_reads_ > 0) {
+ TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("pos"), pos_));
+ // If num_buffer_reads_ == 0, the buffer hasn't been filled even once.
+ TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("num_buffer_reads"),
+ num_buffer_reads_));
+ }
+ return Status::OK();
}
+
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
- // TODO(rachelim): Implement restore
- return errors::Unimplemented("CSVDataset: RestoreInternal");
+ ResetStreamsLocked();
+ int64 current_file_index;
+ TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("current_file_index"),
+ &current_file_index));
+ current_file_index_ = size_t(current_file_index);
+ // The keys "pos" and "num_buffer_reads" are written only if
+ // the iterator was saved with an open, partially read file.
+ if (reader->Contains(full_name("pos"))) {
+ int64 pos, num_buffer_reads;
+ TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("pos"), &pos));
+ TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("num_buffer_reads"),
+ &num_buffer_reads));
+
+ TF_RETURN_IF_ERROR(SetupStreamsLocked(ctx->env()));
+
+ num_buffer_reads_ = size_t(num_buffer_reads - 1);
+
+ // Restores the most recently held buffer
+ Status s = input_stream_->SkipNBytes(
+ num_buffer_reads_ * dataset()->options_.input_buffer_size);
+ if (!s.ok() && !errors::IsOutOfRange(s)) {
+ // We might get out of range error here if the size of the file
+ // is not an exact multiple of the buffer size, and the last buffer
+ // read is < buffer_size. This is valid and we do not surface the
+ // error.
+ return s;
+ }
+
+ Status s2 = FillBuffer(&buffer_);
+ if (!s2.ok() && !errors::IsOutOfRange(s2)) {
+ return s2;
+ }
+ pos_ = size_t(pos);
+ }
+ return Status::OK();
}
private:
@@ -510,7 +613,9 @@ class CSVDatasetOp : public DatasetOpKernel {
Status FillBuffer(string* result) EXCLUSIVE_LOCKS_REQUIRED(mu_) {
result->clear();
- Status s = input_stream_->ReadNBytes(dataset()->buffer_size_, result);
+ ++num_buffer_reads_;
+ Status s = input_stream_->ReadNBytes(
+ dataset()->options_.input_buffer_size, result);
if (errors::IsOutOfRange(s) && !result->empty()) {
// Ignore OutOfRange error when ReadNBytes read < N bytes.
@@ -675,10 +780,20 @@ class CSVDatasetOp : public DatasetOpKernel {
// Actually move on to next file.
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(
dataset()->filenames_[current_file_index_], &file_));
- input_stream_.reset(
- new io::RandomAccessInputStream(file_.get(), false));
+ random_access_input_stream_ =
+ std::make_shared<io::RandomAccessInputStream>(file_.get(), false);
+
+ if (dataset()->use_compression_) {
+ input_stream_ = std::make_shared<io::ZlibInputStream>(
+ random_access_input_stream_.get(),
+ dataset()->options_.input_buffer_size,
+ dataset()->options_.input_buffer_size, dataset()->options_);
+ } else {
+ input_stream_ = random_access_input_stream_;
+ }
buffer_.clear();
pos_ = 0;
+ num_buffer_reads_ = 0;
if (dataset()->header_) {
// Read one line, but don't include it. Pass nullptrs as dummy
// pointers to objects that shouldn't be invoked anyway
@@ -704,8 +819,10 @@ class CSVDatasetOp : public DatasetOpKernel {
string buffer_ GUARDED_BY(mu_); // Maintain our own buffer
size_t pos_ GUARDED_BY(
mu_); // Index into the buffer must be maintained between iters
- std::unique_ptr<io::RandomAccessInputStream> input_stream_
+ size_t num_buffer_reads_ GUARDED_BY(mu_);
+ std::shared_ptr<io::RandomAccessInputStream> random_access_input_stream_
GUARDED_BY(mu_);
+ std::shared_ptr<io::InputStreamInterface> input_stream_ GUARDED_BY(mu_);
size_t current_file_index_ GUARDED_BY(mu_) = 0;
std::unique_ptr<RandomAccessFile> file_
GUARDED_BY(mu_); // must outlive input_stream_
@@ -713,7 +830,6 @@ class CSVDatasetOp : public DatasetOpKernel {
const std::vector<string> filenames_;
const bool header_;
- const int64 buffer_size_;
const DataTypeVector out_type_;
const std::vector<PartialTensorShape> output_shapes_;
const std::vector<Tensor> record_defaults_;
@@ -721,6 +837,9 @@ class CSVDatasetOp : public DatasetOpKernel {
const bool use_quote_delim_;
const char delim_;
const string na_value_;
+ const bool use_compression_;
+ const string compression_type_;
+ const io::ZlibCompressionOptions options_;
}; // class Dataset
DataTypeVector output_types_;
diff --git a/tensorflow/contrib/data/kernels/prefetching_kernels.cc b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
index b3d464d716..6edc61b2c2 100644
--- a/tensorflow/contrib/data/kernels/prefetching_kernels.cc
+++ b/tensorflow/contrib/data/kernels/prefetching_kernels.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include <deque>
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
+#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_op_kernel.h"
@@ -23,6 +24,7 @@ limitations under the License.
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
+namespace {
struct BufferElement {
// The producer sets `status` if getting the input element fails.
@@ -473,4 +475,465 @@ class IteratorGetDeviceOp : public OpKernel {
REGISTER_KERNEL_BUILDER(Name("IteratorGetDevice").Device(DEVICE_CPU),
IteratorGetDeviceOp);
+Status VerifyTypesMatch(const DataTypeVector& expected,
+ const DataTypeVector& received) {
+ if (expected.size() != received.size()) {
+ return errors::InvalidArgument(
+ "Number of components does not match: expected ", expected.size(),
+ " types but got ", received.size(), ".");
+ }
+ for (size_t i = 0; i < expected.size(); ++i) {
+ if (expected[i] != received[i]) {
+ return errors::InvalidArgument("Data type mismatch at component ", i,
+ ": expected ", DataTypeString(expected[i]),
+ " but got ", DataTypeString(received[i]),
+ ".");
+ }
+ }
+ return Status::OK();
+}
+
+Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
+ const std::vector<PartialTensorShape>& received) {
+ if (expected.size() != received.size()) {
+ return errors::InvalidArgument(
+ "Number of components does not match: expected ", expected.size(),
+ " shapes but got ", received.size(), ".");
+ }
+ for (size_t i = 0; i < expected.size(); ++i) {
+ if (!expected[i].IsCompatibleWith(received[i])) {
+ return errors::InvalidArgument("Incompatible shapes at component ", i,
+ ": expected ", expected[i].DebugString(),
+ " but got ", received[i].DebugString(),
+ ".");
+ }
+ }
+
+ return Status::OK();
+}
+
+string SanitizeThreadSuffix(string suffix) {
+ string clean;
+ for (int i = 0; i < suffix.size(); ++i) {
+ const char ch = suffix[i];
+ if ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9') || ch == '_' || ch == '-') {
+ clean += ch;
+ } else {
+ clean += '_';
+ }
+ }
+ return clean;
+}
+
+class MultiDeviceIterator : public ResourceBase {
+ public:
+ MultiDeviceIterator(const DataTypeVector& output_types,
+ const std::vector<PartialTensorShape>& output_shapes,
+ const std::vector<string>& devices,
+ std::unique_ptr<FunctionLibraryDefinition> flib_def,
+ std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
+ FunctionLibraryRuntime* lib)
+ : output_types_(output_types),
+ output_shapes_(output_shapes),
+ devices_(devices),
+ flib_def_(std::move(flib_def)),
+ pflr_(std::move(pflr)),
+ lib_(lib) {
+ buffer_.resize(devices_.size());
+ }
+
+ string DebugString() override {
+ return strings::StrCat("MultiDeviceIterator");
+ }
+
+ Status Init(std::unique_ptr<IteratorBase> iterator, int64* incarnation_id) {
+ mutex_lock l(mu_);
+ if (iterator) {
+ TF_RETURN_IF_ERROR(
+ VerifyTypesMatch(output_types_, iterator->output_dtypes()));
+ TF_RETURN_IF_ERROR(
+ VerifyShapesCompatible(output_shapes_, iterator->output_shapes()));
+ }
+ host_iterator_.reset(iterator.release());
+ incarnation_id_++;
+ *incarnation_id = incarnation_id_;
+ max_buffer_size_ = 0;
+ num_elements_ = 0;
+ buffer_.clear();
+ buffer_.resize(devices_.size());
+ return Status::OK();
+ }
+
+ Status GetNextFromShard(IteratorContext* ctx, int shard_num,
+ int64 incarnation_id,
+ std::vector<Tensor>* out_tensors,
+ bool* end_of_sequence) {
+ // TODO(rohanj): This might potentially strand elements in other shards.
+ // Opportunity to do smarter locking semantics.
+ mutex_lock l(mu_);
+ // Make sure we're in the right incarnation.
+ if (incarnation_id != incarnation_id_) {
+ return errors::InvalidArgument(
+ "Current incarnation: ", incarnation_id_,
+ "; Supplied incarnation: ", incarnation_id);
+ }
+ // Then look it up in the buffer.
+ if (!buffer_[shard_num].empty()) {
+ const HostBufferElement& elem = buffer_[shard_num].front();
+ *out_tensors = elem.value;
+ *end_of_sequence = elem.end_of_sequence;
+ Status s = elem.status;
+ buffer_[shard_num].pop_front();
+ return s;
+ }
+ std::shared_ptr<IteratorBase> captured_iterator(host_iterator_);
+ if (captured_iterator) {
+ if (lib_ != nullptr) {
+ ctx->set_lib(lib_);
+ }
+ while (true) {
+ HostBufferElement elem;
+ elem.status =
+ captured_iterator->GetNext(ctx, &elem.value, &elem.end_of_sequence);
+ int buffer_index = num_elements_ % devices_.size();
+ num_elements_++;
+ if (buffer_index == shard_num) {
+ out_tensors->swap(elem.value);
+ *end_of_sequence = elem.end_of_sequence;
+ return elem.status;
+ } else {
+ buffer_[buffer_index].push_back(std::move(elem));
+ // TODO(rohanj): Put an upper bound to buffer size.
+ if (buffer_[buffer_index].size() > max_buffer_size_) {
+ max_buffer_size_ = buffer_[buffer_index].size();
+ VLOG(1) << "MultiDeviceIterator: Max buffer size increased to: "
+ << max_buffer_size_;
+ }
+ }
+ }
+ } else {
+ return errors::FailedPrecondition("Iterator not initialized");
+ }
+ return Status::OK();
+ }
+
+ const DataTypeVector& output_types() const { return output_types_; }
+
+ const std::vector<PartialTensorShape>& output_shapes() const {
+ return output_shapes_;
+ }
+
+ std::shared_ptr<const FunctionLibraryDefinition> function_library() {
+ tf_shared_lock l(mu_);
+ return lib_def_;
+ }
+
+ private:
+ struct HostBufferElement {
+ Status status;
+ bool end_of_sequence;
+ std::vector<Tensor> value;
+ };
+
+ mutex mu_;
+ const DataTypeVector output_types_;
+ const std::vector<PartialTensorShape> output_shapes_;
+ const std::vector<string> devices_;
+ int64 num_elements_ GUARDED_BY(mu_) = 0;
+ int64 max_buffer_size_ GUARDED_BY(mu_) = 0;
+ int64 incarnation_id_ GUARDED_BY(mu_) = 0;
+ std::vector<std::deque<HostBufferElement>> buffer_ GUARDED_BY(mu_);
+ std::unique_ptr<FunctionLibraryDefinition> flib_def_;
+ std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
+ FunctionLibraryRuntime* lib_ = nullptr; // not owned.
+ std::shared_ptr<IteratorBase> host_iterator_;
+ std::shared_ptr<const FunctionLibraryDefinition> lib_def_ GUARDED_BY(mu_);
+};
+
+// Just creates a MultiDeviceIterator and returns it.
+class MultiDeviceIteratorHandleOp : public OpKernel {
+ public:
+ explicit MultiDeviceIteratorHandleOp(OpKernelConstruction* ctx)
+ : OpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("shared_name", &name_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("container", &container_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("devices", &devices_));
+ }
+
+ // The resource is deleted from the resource manager only when it is private
+ // to kernel.
+ ~MultiDeviceIteratorHandleOp() override {
+ if (resource_ != nullptr) {
+ resource_->Unref();
+ if (cinfo_.resource_is_private_to_kernel()) {
+ if (!cinfo_.resource_manager()
+ ->template Delete<MultiDeviceIterator>(cinfo_.container(),
+ cinfo_.name())
+ .ok()) {
+ // Do nothing; the resource can have been deleted by session resets.
+ }
+ }
+ }
+ }
+
+ void Compute(OpKernelContext* context) override LOCKS_EXCLUDED(mu_) {
+ {
+ mutex_lock l(mu_);
+ if (resource_ == nullptr) {
+ FunctionLibraryRuntime* lib;
+ std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
+ std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(nullptr);
+ OP_REQUIRES_OK(context, context->function_library()->Clone(
+ &flib_def, &pflr, &lib));
+ ResourceMgr* mgr = context->resource_manager();
+ OP_REQUIRES_OK(context, cinfo_.Init(mgr, def()));
+
+ MultiDeviceIterator* resource;
+ OP_REQUIRES_OK(
+ context,
+ mgr->LookupOrCreate<MultiDeviceIterator>(
+ cinfo_.container(), cinfo_.name(), &resource,
+ [this, lib, &flib_def, &pflr](MultiDeviceIterator** ret)
+ EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ *ret = new MultiDeviceIterator(
+ output_types_, output_shapes_, devices_,
+ std::move(flib_def), std::move(pflr), lib);
+ return Status::OK();
+ }));
+
+ Status s = VerifyResource(resource);
+ if (TF_PREDICT_FALSE(!s.ok())) {
+ resource->Unref();
+ context->SetStatus(s);
+ return;
+ }
+
+ resource_ = resource;
+ }
+ }
+ OP_REQUIRES_OK(context, MakeResourceHandleToOutput(
+ context, 0, cinfo_.container(), cinfo_.name(),
+ MakeTypeIndex<MultiDeviceIterator>()));
+ }
+
+ private:
+ // During the first Compute(), resource is either created or looked up using
+ // shared_name. In the latter case, the resource found should be verified if
+ // it is compatible with this op's configuration. The verification may fail in
+ // cases such as two graphs asking queues of the same shared name to have
+ // inconsistent capacities.
+ Status VerifyResource(MultiDeviceIterator* resource) {
+ TF_RETURN_IF_ERROR(
+ VerifyTypesMatch(output_types_, resource->output_types()));
+ TF_RETURN_IF_ERROR(
+ VerifyShapesCompatible(output_shapes_, resource->output_shapes()));
+ return Status::OK();
+ }
+
+ mutex mu_;
+ ContainerInfo cinfo_; // Written once under mu_ then constant afterwards.
+ MultiDeviceIterator* resource_ GUARDED_BY(mu_) = nullptr;
+ DataTypeVector output_types_;
+ std::vector<PartialTensorShape> output_shapes_;
+ const int graph_def_version_;
+ string name_;
+ string container_;
+ std::vector<string> devices_;
+};
+
+REGISTER_KERNEL_BUILDER(Name("MultiDeviceIterator").Device(DEVICE_CPU),
+ MultiDeviceIteratorHandleOp);
+
+// Calls init on the MultiDeviceIterator.
+class MultiDeviceIteratorInitOp : public OpKernel {
+ public:
+ explicit MultiDeviceIteratorInitOp(OpKernelConstruction* ctx)
+ : OpKernel(ctx) {}
+
+ void Compute(OpKernelContext* ctx) override {
+ DatasetBase* dataset;
+ OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset));
+ MultiDeviceIterator* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 1), &resource));
+ core::ScopedUnref unref(resource);
+
+ IteratorContext iter_ctx = dataset::MakeIteratorContext(ctx);
+ std::unique_ptr<IteratorBase> iterator;
+ OP_REQUIRES_OK(ctx,
+ dataset->MakeIterator(&iter_ctx, "Iterator", &iterator));
+ int64 incarnation_id;
+ OP_REQUIRES_OK(ctx, resource->Init(std::move(iterator), &incarnation_id));
+ Tensor tensor_incarnation_id(DT_INT64, TensorShape({}));
+ tensor_incarnation_id.scalar<int64>()() = incarnation_id;
+ OP_REQUIRES_OK(ctx,
+ ctx->set_output("incarnation_id", tensor_incarnation_id));
+ }
+};
+
+REGISTER_KERNEL_BUILDER(Name("MultiDeviceIteratorInit").Device(DEVICE_CPU),
+ MultiDeviceIteratorInitOp);
+
+// Calls GetNextFromShard(shard) and returns a vector of Tensors as output.
+// TODO(rohanj): Implement using BackgroundWorker that Derek built?
+class MultiDeviceIteratorGetNextFromShardOp : public AsyncOpKernel {
+ public:
+ explicit MultiDeviceIteratorGetNextFromShardOp(OpKernelConstruction* ctx)
+ : AsyncOpKernel(ctx),
+ thread_pool_(new thread::ThreadPool(
+ ctx->env(), ThreadOptions(),
+ strings::StrCat("multi_device_iterator_get_next_thread_",
+ SanitizeThreadSuffix(name())),
+ 1 /* num_threads */, false /* low_latency_hint */)) {}
+
+ void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
+ const Tensor* tensor_shard_num;
+ OP_REQUIRES_OK(ctx, ctx->input("shard_num", &tensor_shard_num));
+ int32 shard_num = tensor_shard_num->scalar<int32>()();
+
+ const Tensor* tensor_incarnation_id;
+ OP_REQUIRES_OK(ctx, ctx->input("incarnation_id", &tensor_incarnation_id));
+ int64 incarnation_id = tensor_incarnation_id->scalar<int64>()();
+
+ MultiDeviceIterator* iterator;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &iterator));
+ thread_pool_->Schedule(std::bind(
+ [ctx, iterator, shard_num, incarnation_id](DoneCallback done) {
+ std::vector<Tensor> components;
+ bool end_of_sequence = false;
+
+ IteratorContext::Params params;
+ params.env = ctx->env();
+ params.runner = *(ctx->runner());
+ params.function_library = iterator->function_library();
+ DeviceBase* device = ctx->function_library()->device();
+ params.allocator_getter = [device](AllocatorAttributes attrs) {
+ return device->GetAllocator(attrs);
+ };
+ IteratorContext iter_ctx(std::move(params));
+
+ Status s =
+ iterator->GetNextFromShard(&iter_ctx, shard_num, incarnation_id,
+ &components, &end_of_sequence);
+ iterator->Unref();
+
+ if (!s.ok()) {
+ ctx->SetStatus(s);
+ } else if (end_of_sequence) {
+ ctx->SetStatus(errors::OutOfRange("End of sequence"));
+ } else {
+ for (int i = 0; i < components.size(); ++i) {
+ // TODO(mrry): Check that the shapes match the shape attrs.
+ ctx->set_output(i, components[i]);
+ }
+ }
+ done();
+ },
+ std::move(done)));
+ }
+
+ private:
+ std::unique_ptr<thread::ThreadPool> thread_pool_;
+};
+
+REGISTER_KERNEL_BUILDER(
+ Name("MultiDeviceIteratorGetNextFromShard").Device(DEVICE_CPU),
+ MultiDeviceIteratorGetNextFromShardOp);
+
+class MultiDeviceIteratorToStringHandleOp : public OpKernel {
+ public:
+ explicit MultiDeviceIteratorToStringHandleOp(OpKernelConstruction* ctx)
+ : OpKernel(ctx) {}
+
+ void Compute(OpKernelContext* ctx) override {
+ const Tensor& resource_handle_t = ctx->input(0);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(resource_handle_t.shape()),
+ errors::InvalidArgument("resource_handle must be a scalar"));
+
+ // Validate that the handle corresponds to a real resource, and
+ // that it is an MultiDeviceIterator.
+ MultiDeviceIterator* resource;
+ OP_REQUIRES_OK(ctx,
+ LookupResource(ctx, HandleFromInput(ctx, 0), &resource));
+ resource->Unref();
+
+ Tensor* string_handle_t;
+ OP_REQUIRES_OK(ctx,
+ ctx->allocate_output(0, TensorShape({}), &string_handle_t));
+ string_handle_t->scalar<string>()() =
+ resource_handle_t.scalar<ResourceHandle>()().SerializeAsString();
+ }
+};
+
+REGISTER_KERNEL_BUILDER(
+ Name("MultiDeviceIteratorToStringHandle").Device(DEVICE_CPU),
+ MultiDeviceIteratorToStringHandleOp);
+
+class MultiDeviceIteratorFromStringHandleOp : public OpKernel {
+ public:
+ explicit MultiDeviceIteratorFromStringHandleOp(OpKernelConstruction* ctx)
+ : OpKernel(ctx) {
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_));
+ OP_REQUIRES(
+ ctx,
+ output_types_.empty() || output_shapes_.empty() ||
+ output_types_.size() == output_shapes_.size(),
+ errors::InvalidArgument("If both 'output_types' and 'output_shapes' "
+ "are set, they must have the same length."));
+ }
+
+ void Compute(OpKernelContext* ctx) override {
+ const Tensor& string_handle_t = ctx->input(0);
+ OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(string_handle_t.shape()),
+ errors::InvalidArgument("string_handle must be a scalar"));
+
+ ResourceHandle resource_handle;
+ OP_REQUIRES(
+ ctx,
+ resource_handle.ParseFromString(string_handle_t.scalar<string>()()),
+ errors::InvalidArgument(
+ "Could not parse string_handle as a valid ResourceHandle"));
+
+ OP_REQUIRES(
+ ctx, resource_handle.device() == ctx->device()->attributes().name(),
+ errors::InvalidArgument("Attempted create an iterator on device \"",
+ ctx->device()->attributes().name(),
+ "\" from handle defined on device \"",
+ resource_handle.device(), "\""));
+
+ // Validate that the handle corresponds to a real resource, and
+ // that it is an MultiDeviceIterator.
+ MultiDeviceIterator* resource;
+ OP_REQUIRES_OK(ctx, LookupResource(ctx, resource_handle, &resource));
+ core::ScopedUnref unref_iterator(resource);
+ if (!output_types_.empty()) {
+ OP_REQUIRES_OK(ctx,
+ VerifyTypesMatch(output_types_, resource->output_types()));
+ }
+ if (!output_shapes_.empty()) {
+ OP_REQUIRES_OK(ctx, VerifyShapesCompatible(output_shapes_,
+ resource->output_shapes()));
+ }
+
+ Tensor* resource_handle_t;
+ OP_REQUIRES_OK(
+ ctx, ctx->allocate_output(0, TensorShape({}), &resource_handle_t));
+ resource_handle_t->scalar<ResourceHandle>()() = resource_handle;
+ }
+
+ private:
+ DataTypeVector output_types_;
+ std::vector<PartialTensorShape> output_shapes_;
+};
+
+REGISTER_KERNEL_BUILDER(
+ Name("MultiDeviceIteratorFromStringHandle").Device(DEVICE_CPU),
+ MultiDeviceIteratorFromStringHandleOp);
+
+} // anonymous namespace
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/ops/dataset_ops.cc b/tensorflow/contrib/data/ops/dataset_ops.cc
index 8413fcaf87..66a7c7fdcd 100644
--- a/tensorflow/contrib/data/ops/dataset_ops.cc
+++ b/tensorflow/contrib/data/ops/dataset_ops.cc
@@ -36,6 +36,7 @@ data_input_datasets: `N` datasets with the same type that will be interleaved
REGISTER_OP("CSVDataset")
.Input("filenames: string")
+ .Input("compression_type: string")
.Input("buffer_size: int64")
.Input("header: bool")
.Input("field_delim: string")
@@ -52,17 +53,18 @@ REGISTER_OP("CSVDataset")
shape_inference::ShapeHandle unused;
// `filenames` must be a scalar or a vector.
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &unused));
- // `buffer_size`, `header`, `field_delim`, `use_quote_delim`,
- // `na_value` must be scalars
+ // `compression_type`, `buffer_size`, `header`, `field_delim`,
+ // `use_quote_delim`, `na_value` must be scalars
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(6), 0, &unused));
// `select_cols` must be a vector
- TF_RETURN_IF_ERROR(c->WithRank(c->input(6), 1, &unused));
- // `record_defaults` must be a list of scalars...?
- for (size_t i = 7; i < c->num_inputs(); ++i) {
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(7), 1, &unused));
+ // `record_defaults` must be lists of scalars
+ for (size_t i = 8; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &unused));
}
return shape_inference::ScalarShape(c);
@@ -143,6 +145,80 @@ Resets the FunctionBufferingResource.
function_buffer_resource: The FunctionBufferingResource handle.
)doc");
+REGISTER_OP("MultiDeviceIterator")
+ .Output("handle: resource")
+ .Attr("devices: list(string) >= 1")
+ .Attr("shared_name: string")
+ .Attr("container: string")
+ .Attr("output_types: list(type) >= 1")
+ .Attr("output_shapes: list(shape) >= 1")
+ .Doc(R"doc(
+Creates a MultiDeviceIterator resource.
+
+handle: Handle to the resource created.
+devices: A list of devices the iterator works across.
+shared_name: If non-empty, this resource will be shared under the given name
+ across multiple sessions.
+container: If non-empty, this resource is placed in the given container.
+ Otherwise, a default container is used.
+output_types: The type list for the return values.
+output_shapes: The list of shapes being produced.
+)doc");
+
+REGISTER_OP("MultiDeviceIteratorInit")
+ .Input("dataset: variant")
+ .Input("multi_device_iterator: resource")
+ .Output("incarnation_id: int64")
+ .Doc(R"doc(
+Initializes the multi device iterator with the given dataset.
+incarnation_id: An int64 indicating which incarnation of the MultiDeviceIterator
+ is running.
+dataset: Dataset to be iterated upon.
+multi_device_iterator: A MultiDeviceIteratorResource.
+)doc");
+
+REGISTER_OP("MultiDeviceIteratorGetNextFromShard")
+ .Input("multi_device_iterator: resource")
+ .Input("shard_num: int32")
+ .Input("incarnation_id: int64")
+ .Output("components: output_types")
+ .Attr("output_types: list(type) >= 1")
+ .Attr("output_shapes: list(shape) >= 1")
+ .Doc(R"doc(
+Gets next element for the provided shard number.
+
+multi_device_iterator: A MultiDeviceIterator resource.
+shard_num: Integer representing which shard to fetch data for.
+incarnation_id: Which incarnation of the MultiDeviceIterator is running.
+components: Result of the get_next on the dataset.
+output_types: The type list for the return values.
+output_shapes: The list of shapes being produced.
+)doc");
+
+REGISTER_OP("MultiDeviceIteratorToStringHandle")
+ .Input("multi_device_iterator: resource")
+ .Output("string_handle: string")
+ .Doc(R"doc(
+Produces a string handle for the given MultiDeviceIterator.
+
+multi_device_iterator: A MultiDeviceIterator resource.
+string_handle: A string representing the resource.
+)doc");
+
+REGISTER_OP("MultiDeviceIteratorFromStringHandle")
+ .Input("string_handle: string")
+ .Output("multi_device_iterator: resource")
+ .Attr("output_types: list(type) >= 0 = []")
+ .Attr("output_shapes: list(shape) >= 0 = []")
+ .Doc(R"doc(
+Generates a MultiDeviceIterator resource from its provided string handle.
+
+string_handle: String representing the resource.
+multi_device_iterator: A MultiDeviceIterator resource.
+output_types: The type list for the return values.
+output_shapes: The list of shapes being produced.
+)doc");
+
REGISTER_OP("ThreadPoolDataset")
.Input("input_dataset: variant")
.Input("thread_pool: resource")
@@ -175,4 +251,17 @@ display_name: A human-readable name for the threads that may be visible in
some visualizations.
)doc");
+REGISTER_OP("AssertNextDataset")
+ .Input("input_dataset: variant")
+ .Input("transformations: string")
+ .Output("handle: variant")
+ .Attr("output_types: list(type) >= 1")
+ .Attr("output_shapes: list(shape) >= 1")
+ .SetShapeFn([](shape_inference::InferenceContext* c) {
+ shape_inference::ShapeHandle unused;
+ // transformations should be a vector.
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
+ return shape_inference::ScalarShape(c);
+ });
+
} // namespace tensorflow
diff --git a/tensorflow/contrib/data/python/kernel_tests/BUILD b/tensorflow/contrib/data/python/kernel_tests/BUILD
index 9a454efc4c..036dc795bb 100644
--- a/tensorflow/contrib/data/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/data/python/kernel_tests/BUILD
@@ -60,7 +60,7 @@ py_test(
py_test(
name = "csv_dataset_op_test",
- size = "small",
+ size = "medium",
srcs = ["csv_dataset_op_test.py"],
srcs_version = "PY2AND3",
tags = ["no_pip"],
@@ -121,6 +121,7 @@ py_test(
srcs = ["get_single_element_test.py"],
deps = [
"//tensorflow/contrib/data/python/ops:get_single_element",
+ "//tensorflow/contrib/data/python/ops:grouping",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:constant_op",
@@ -128,6 +129,7 @@ py_test(
"//tensorflow/python:errors",
"//tensorflow/python:sparse_tensor",
"//tensorflow/python/data/ops:dataset_ops",
+ "@absl_py//absl/testing:parameterized",
],
)
@@ -208,7 +210,6 @@ py_test(
srcs_version = "PY2AND3",
deps = [
"//tensorflow/contrib/data/python/ops:optimization",
- "//tensorflow/core:protos_all_py",
"//tensorflow/python:client_testlib",
"//tensorflow/python:errors",
"//tensorflow/python/data/ops:dataset_ops",
@@ -380,6 +381,7 @@ py_test(
"//tensorflow/python:sparse_tensor",
"//tensorflow/python/data/ops:dataset_ops",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
],
)
diff --git a/tensorflow/contrib/data/python/kernel_tests/csv_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/csv_dataset_op_test.py
index df115175f5..2a0e64caeb 100644
--- a/tensorflow/contrib/data/python/kernel_tests/csv_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/csv_dataset_op_test.py
@@ -18,10 +18,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import gzip
import os
import string
import tempfile
import time
+import zlib
import numpy as np
@@ -62,18 +64,29 @@ class CsvDatasetOpTest(test.TestCase):
op2 = sess.run(next2)
self.assertAllEqual(op1, op2)
- def setup_files(self, inputs, linebreak='\n'):
+ def _setup_files(self, inputs, linebreak='\n', compression_type=None):
filenames = []
for i, ip in enumerate(inputs):
fn = os.path.join(self.get_temp_dir(), 'temp_%d.csv' % i)
- with open(fn, 'wb') as f:
- f.write(linebreak.join(ip).encode('utf-8'))
+ contents = linebreak.join(ip).encode('utf-8')
+ if compression_type is None:
+ with open(fn, 'wb') as f:
+ f.write(contents)
+ elif compression_type == 'GZIP':
+ with gzip.GzipFile(fn, 'wb') as f:
+ f.write(contents)
+ elif compression_type == 'ZLIB':
+ contents = zlib.compress(contents)
+ with open(fn, 'wb') as f:
+ f.write(contents)
+ else:
+ raise ValueError('Unsupported compression_type', compression_type)
filenames.append(fn)
return filenames
def _make_test_datasets(self, inputs, **kwargs):
# Test by comparing its output to what we could get with map->decode_csv
- filenames = self.setup_files(inputs)
+ filenames = self._setup_files(inputs)
dataset_expected = core_readers.TextLineDataset(filenames)
dataset_expected = dataset_expected.map(
lambda l: parsing_ops.decode_csv(l, **kwargs))
@@ -112,15 +125,18 @@ class CsvDatasetOpTest(test.TestCase):
except errors.OutOfRangeError:
break
- def _test_dataset(self,
- inputs,
- expected_output=None,
- expected_err_re=None,
- linebreak='\n',
- **kwargs):
+ def _test_dataset(
+ self,
+ inputs,
+ expected_output=None,
+ expected_err_re=None,
+ linebreak='\n',
+ compression_type=None, # Used for both setup and parsing
+ **kwargs):
"""Checks that elements produced by CsvDataset match expected output."""
# Convert str type because py3 tf strings are bytestrings
- filenames = self.setup_files(inputs, linebreak)
+ filenames = self._setup_files(inputs, linebreak, compression_type)
+ kwargs['compression_type'] = compression_type
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = readers.CsvDataset(filenames, **kwargs)
@@ -174,7 +190,7 @@ class CsvDatasetOpTest(test.TestCase):
def testCsvDataset_ignoreErrWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,"2"3",4', '1,"2"3",4",5,5', 'a,b,"c"d"', 'e,f,g']]
- filenames = self.setup_files(inputs)
+ filenames = self._setup_files(inputs)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
@@ -184,7 +200,7 @@ class CsvDatasetOpTest(test.TestCase):
def testCsvDataset_ignoreErrWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4', 'a,b,c"d', '9,8"7,6,5', 'e,f,g']]
- filenames = self.setup_files(inputs)
+ filenames = self._setup_files(inputs)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
@@ -355,7 +371,7 @@ class CsvDatasetOpTest(test.TestCase):
'1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19',
'1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19'
]]
- file_path = self.setup_files(data)
+ file_path = self._setup_files(data)
with ops.Graph().as_default() as g:
ds = readers.make_csv_dataset(
@@ -432,14 +448,29 @@ class CsvDatasetOpTest(test.TestCase):
record_defaults=record_defaults,
buffer_size=0)
- def testCsvDataset_withBufferSize(self):
+ def _test_dataset_on_buffer_sizes(self,
+ inputs,
+ expected,
+ linebreak,
+ record_defaults,
+ compression_type=None,
+ num_sizes_to_test=20):
+ # Testing reading with a range of buffer sizes that should all work.
+ for i in list(range(1, 1 + num_sizes_to_test)) + [None]:
+ self._test_dataset(
+ inputs,
+ expected,
+ linebreak=linebreak,
+ compression_type=compression_type,
+ record_defaults=record_defaults,
+ buffer_size=i)
+
+ def testCsvDataset_withLF(self):
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
- for i in range(20):
- # Test a range of buffer sizes that should all work
- self._test_dataset(
- inputs, expected, record_defaults=record_defaults, buffer_size=i + 1)
+ self._test_dataset_on_buffer_sizes(
+ inputs, expected, linebreak='\n', record_defaults=record_defaults)
def testCsvDataset_withCR(self):
# Test that when the line separator is '\r', parsing works with all buffer
@@ -447,14 +478,8 @@ class CsvDatasetOpTest(test.TestCase):
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
- for i in range(20):
- # Test a range of buffer sizes that should all work
- self._test_dataset(
- inputs,
- expected,
- linebreak='\r',
- record_defaults=record_defaults,
- buffer_size=i + 1)
+ self._test_dataset_on_buffer_sizes(
+ inputs, expected, linebreak='\r', record_defaults=record_defaults)
def testCsvDataset_withCRLF(self):
# Test that when the line separator is '\r\n', parsing works with all buffer
@@ -462,29 +487,15 @@ class CsvDatasetOpTest(test.TestCase):
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
- for i in range(20):
- # Test a range of buffer sizes that should all work
- self._test_dataset(
- inputs,
- expected,
- linebreak='\r\n',
- record_defaults=record_defaults,
- buffer_size=i + 1)
+ self._test_dataset_on_buffer_sizes(
+ inputs, expected, linebreak='\r\n', record_defaults=record_defaults)
def testCsvDataset_withBufferSizeAndQuoted(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
- for i in range(20):
- # Test a range of buffer sizes that should all work
- self._test_dataset(
- inputs,
- expected,
- linebreak='\n',
- record_defaults=record_defaults,
- buffer_size=i + 1)
- self._test_dataset(
+ self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\n', record_defaults=record_defaults)
def testCsvDataset_withCRAndQuoted(self):
@@ -494,15 +505,7 @@ class CsvDatasetOpTest(test.TestCase):
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
- for i in range(20):
- # Test a range of buffer sizes that should all work
- self._test_dataset(
- inputs,
- expected,
- linebreak='\r',
- record_defaults=record_defaults,
- buffer_size=i + 1)
- self._test_dataset(
+ self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r', record_defaults=record_defaults)
def testCsvDataset_withCRLFAndQuoted(self):
@@ -512,17 +515,33 @@ class CsvDatasetOpTest(test.TestCase):
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
- for i in range(20):
- # Test a range of buffer sizes that should all work
- self._test_dataset(
- inputs,
- expected,
- linebreak='\r\n',
- record_defaults=record_defaults,
- buffer_size=i + 1)
- self._test_dataset(
+ self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r\n', record_defaults=record_defaults)
+ def testCsvDataset_withGzipCompressionType(self):
+ record_defaults = [['NA']] * 3
+ inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
+ expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
+ ['NA', 'NA', 'NA']]
+ self._test_dataset_on_buffer_sizes(
+ inputs,
+ expected,
+ linebreak='\r\n',
+ compression_type='GZIP',
+ record_defaults=record_defaults)
+
+ def testCsvDataset_withZlibCompressionType(self):
+ record_defaults = [['NA']] * 3
+ inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
+ expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
+ ['NA', 'NA', 'NA']]
+ self._test_dataset_on_buffer_sizes(
+ inputs,
+ expected,
+ linebreak='\r\n',
+ compression_type='ZLIB',
+ record_defaults=record_defaults)
+
class CsvDatasetBenchmark(test.Benchmark):
"""Benchmarks for the various ways of creating a dataset from CSV files.
diff --git a/tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py b/tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py
index 87b7c6ddb7..e6883d53e0 100644
--- a/tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/get_single_element_test.py
@@ -17,9 +17,12 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from absl.testing import parameterized
+import numpy as np
+
from tensorflow.contrib.data.python.ops import get_single_element
+from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.ops import dataset_ops
-from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
@@ -27,40 +30,69 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
-class GetSingleElementTest(test.TestCase):
+class GetSingleElementTest(test.TestCase, parameterized.TestCase):
- def testGetSingleElement(self):
- skip_value = array_ops.placeholder(dtypes.int64, shape=[])
- take_value = array_ops.placeholder_with_default(
- constant_op.constant(1, dtype=dtypes.int64), shape=[])
+ @parameterized.named_parameters(
+ ("Zero", 0, 1),
+ ("Five", 5, 1),
+ ("Ten", 10, 1),
+ ("Empty", 100, 1, errors.InvalidArgumentError, "Dataset was empty."),
+ ("MoreThanOne", 0, 2, errors.InvalidArgumentError,
+ "Dataset had more than one element."),
+ )
+ def testGetSingleElement(self, skip, take, error=None, error_msg=None):
+ skip_t = array_ops.placeholder(dtypes.int64, shape=[])
+ take_t = array_ops.placeholder(dtypes.int64, shape=[])
def make_sparse(x):
x_1d = array_ops.reshape(x, [1])
x_2d = array_ops.reshape(x, [1, 1])
return sparse_tensor.SparseTensor(x_2d, x_1d, x_1d)
- dataset = (dataset_ops.Dataset.range(100)
- .skip(skip_value)
- .map(lambda x: (x * x, make_sparse(x)))
- .take(take_value))
-
+ dataset = dataset_ops.Dataset.range(100).skip(skip_t).map(
+ lambda x: (x * x, make_sparse(x))).take(take_t)
element = get_single_element.get_single_element(dataset)
with self.test_session() as sess:
- for x in [0, 5, 10]:
- dense_val, sparse_val = sess.run(element, feed_dict={skip_value: x})
- self.assertEqual(x * x, dense_val)
- self.assertAllEqual([[x]], sparse_val.indices)
- self.assertAllEqual([x], sparse_val.values)
- self.assertAllEqual([x], sparse_val.dense_shape)
-
- with self.assertRaisesRegexp(errors.InvalidArgumentError,
- "Dataset was empty."):
- sess.run(element, feed_dict={skip_value: 100})
-
- with self.assertRaisesRegexp(errors.InvalidArgumentError,
- "Dataset had more than one element."):
- sess.run(element, feed_dict={skip_value: 0, take_value: 2})
+ if error is None:
+ dense_val, sparse_val = sess.run(
+ element, feed_dict={
+ skip_t: skip,
+ take_t: take
+ })
+ self.assertEqual(skip * skip, dense_val)
+ self.assertAllEqual([[skip]], sparse_val.indices)
+ self.assertAllEqual([skip], sparse_val.values)
+ self.assertAllEqual([skip], sparse_val.dense_shape)
+ else:
+ with self.assertRaisesRegexp(error, error_msg):
+ sess.run(element, feed_dict={skip_t: skip, take_t: take})
+
+ @parameterized.named_parameters(
+ ("SumZero", 0),
+ ("SumOne", 1),
+ ("SumFive", 5),
+ ("SumTen", 10),
+ )
+ def testReduceDataset(self, stop):
+ def init_fn(_):
+ return np.int64(0)
+
+ def reduce_fn(state, value):
+ return state + value
+
+ def finalize_fn(state):
+ return state
+
+ sum_reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
+
+ stop_t = array_ops.placeholder(dtypes.int64, shape=[])
+ dataset = dataset_ops.Dataset.range(stop_t)
+ element = get_single_element.reduce_dataset(dataset, sum_reducer)
+
+ with self.test_session() as sess:
+ value = sess.run(element, feed_dict={stop_t: stop})
+ self.assertEqual(stop * (stop - 1) / 2, value)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
index 3bb9723bbc..cfef40e192 100644
--- a/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/optimize_dataset_op_test.py
@@ -18,7 +18,6 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import optimization
-from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
@@ -26,47 +25,76 @@ from tensorflow.python.platform import test
class OptimizeDatasetTest(test.TestCase):
+ def testAssertSuffix(self):
+ dataset = dataset_ops.Dataset.from_tensors(0).apply(
+ optimization.assert_next(["Map"])).map(lambda x: x)
+ iterator = dataset.make_one_shot_iterator()
+ get_next = iterator.get_next()
+
+ with self.test_session() as sess:
+ self.assertEqual(0, sess.run(get_next))
+
+ def testAssertSuffixInvalid(self):
+ dataset = dataset_ops.Dataset.from_tensors(0).apply(
+ optimization.assert_next(["Whoops"])).map(lambda x: x)
+ iterator = dataset.make_one_shot_iterator()
+ get_next = iterator.get_next()
+
+ with self.test_session() as sess:
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError,
+ "Asserted Whoops transformation at offset 0 but encountered "
+ "Map transformation instead."
+ ):
+ sess.run(get_next)
+
+ def testAssertSuffixShort(self):
+ dataset = dataset_ops.Dataset.from_tensors(0).apply(
+ optimization.assert_next(["Map", "Whoops"])).map(lambda x: x)
+ iterator = dataset.make_one_shot_iterator()
+ get_next = iterator.get_next()
+
+ with self.test_session() as sess:
+ with self.assertRaisesRegexp(
+ errors.InvalidArgumentError,
+ "Asserted next 2 transformations but encountered only 1."):
+ sess.run(get_next)
+
def testDefaultOptimizations(self):
- dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x).batch(
- 10).apply(optimization.optimize())
+ dataset = dataset_ops.Dataset.range(10).apply(
+ optimization.assert_next(
+ ["Map", "Batch"])).map(lambda x: x * x).batch(10).apply(
+ optimization.optimize())
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
- graph = graph_pb2.GraphDef().FromString(
- sess.run(dataset._as_serialized_graph()))
- self.assertTrue(
- all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testEmptyOptimizations(self):
- dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x).batch(
- 10).apply(optimization.optimize([]))
+ dataset = dataset_ops.Dataset.range(10).apply(
+ optimization.assert_next(
+ ["Map", "Batch"])).map(lambda x: x * x).batch(10).apply(
+ optimization.optimize([]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
- graph = graph_pb2.GraphDef().FromString(
- sess.run(dataset._as_serialized_graph()))
- self.assertTrue(
- all([node.op != "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOptimization(self):
- dataset = dataset_ops.Dataset.range(10).map(lambda x: x * x).batch(
- 10).apply(optimization.optimize(["map_and_batch_fusion"]))
+ dataset = dataset_ops.Dataset.range(10).apply(
+ optimization.assert_next(
+ ["MapAndBatch"])).map(lambda x: x * x).batch(10).apply(
+ optimization.optimize(["map_and_batch_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
- graph = graph_pb2.GraphDef().FromString(
- sess.run(dataset._as_serialized_graph()))
- self.assertTrue(
- any([node.op == "MapAndBatchDatasetV2" for node in graph.node]))
self.assertAllEqual([x * x for x in range(10)], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
diff --git a/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py b/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
index 82543b1039..2da6131e8e 100644
--- a/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/prefetching_ops_test.py
@@ -31,6 +31,7 @@ from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
@@ -907,5 +908,155 @@ class CopyToDeviceTest(test.TestCase):
sess.run(next_element)
+class MultiDeviceIteratorTest(test.TestCase):
+
+ def testBasic(self):
+ dataset = dataset_ops.Dataset.range(10)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/cpu:2"])
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 3})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 10, 2):
+ self.assertEqual(i, sess.run(elem_on_1))
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+
+ def testOneOnSameDevice(self):
+ with ops.device("/cpu:0"):
+ dataset = dataset_ops.Dataset.range(10)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:0", "/cpu:1"])
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 2})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 10, 2):
+ self.assertEqual(i, sess.run(elem_on_1))
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+
+ def testRepeatDevices(self):
+ with ops.device("/cpu:0"):
+ dataset = dataset_ops.Dataset.range(20)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/cpu:2", "/cpu:1", "/cpu:2"])
+ elements = multi_device_iterator.get_next()
+ elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 3})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 20, 4):
+ self.assertEqual(i, sess.run(elem_on_1))
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ self.assertEqual(i + 2, sess.run(elem_on_3))
+ self.assertEqual(i + 3, sess.run(elem_on_4))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+ sess.run(elem_on_3)
+ sess.run(elem_on_4)
+
+ def testNotFullyDivisible(self):
+ dataset = dataset_ops.Dataset.range(9)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/cpu:2"])
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 3})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 8, 2):
+ self.assertEqual(i, sess.run(elem_on_1))
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ self.assertEqual(8, sess.run(elem_on_1))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+
+ def testUneven(self):
+ dataset = dataset_ops.Dataset.range(10)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/cpu:2"])
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 3})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 10, 2):
+ self.assertEqual(i, sess.run(elem_on_1))
+ for i in range(0, 10, 2):
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+
+ def testMultipleInitializations(self):
+ with ops.device("/cpu:0"):
+ epoch = array_ops.placeholder(dtypes.int64, shape=[])
+ dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000)
+ dataset2 = dataset_ops.Dataset.range(1000)
+ dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4)
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+ init_op = multi_device_iterator.initializer
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 3})
+ with self.test_session(config=config) as sess:
+ for i in range(1000):
+ sess.run(init_op, feed_dict={epoch: i})
+ self.assertEqual([(i, 0), (i, 1)], sess.run([elem_on_1, elem_on_2]))
+
+ def testBasicGpu(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ with compat.forward_compatibility_horizon(2018, 8, 4):
+ dataset = dataset_ops.Dataset.range(10)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/gpu:0"])
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 10, 2):
+ self.assertEqual(i, sess.run(elem_on_1))
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+
+ def testUnevenGpu(self):
+ if not test_util.is_gpu_available():
+ self.skipTest("No GPU available")
+
+ with compat.forward_compatibility_horizon(2018, 8, 4):
+ dataset = dataset_ops.Dataset.range(10)
+ multi_device_iterator = prefetching_ops.MultiDeviceIterator(
+ dataset, ["/cpu:1", "/gpu:0"])
+ elem_on_1, elem_on_2 = multi_device_iterator.get_next()
+
+ config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1})
+ with self.test_session(config=config) as sess:
+ sess.run(multi_device_iterator.initializer)
+ for i in range(0, 10, 2):
+ self.assertEqual(i, sess.run(elem_on_1))
+ for i in range(0, 10, 2):
+ self.assertEqual(i + 1, sess.run(elem_on_2))
+ with self.assertRaises(errors.OutOfRangeError):
+ sess.run(elem_on_1)
+ sess.run(elem_on_2)
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py b/tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
index 9df403ef50..851a33dfc8 100644
--- a/tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py
@@ -17,13 +17,16 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import gzip
import os
+import zlib
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import reader_dataset_ops_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.python.data.ops import readers as core_readers
+from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
@@ -182,264 +185,363 @@ class ReadBatchFeaturesTest(
class MakeCsvDatasetTest(test.TestCase):
- COLUMN_TYPES = [
- dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string
- ]
- COLUMNS = ["col%d" % i for i in range(len(COLUMN_TYPES))]
- DEFAULT_VALS = [[], [], [], [], ["NULL"]]
- DEFAULTS = [
- constant_op.constant([], dtype=dtypes.int32),
- constant_op.constant([], dtype=dtypes.int64),
- constant_op.constant([], dtype=dtypes.float32),
- constant_op.constant([], dtype=dtypes.float64),
- constant_op.constant(["NULL"], dtype=dtypes.string)
- ]
- LABEL = COLUMNS[0]
-
- def setUp(self):
- super(MakeCsvDatasetTest, self).setUp()
- self._num_files = 2
- self._num_records = 11
- self._test_filenames = self._create_files()
-
- def _csv_values(self, fileno, recordno):
- return [
- fileno,
- recordno,
- fileno * recordno * 0.5,
- fileno * recordno + 0.5,
- "record %d" % recordno if recordno % 2 == 1 else "",
- ]
+ def _make_csv_dataset(self, filenames, batch_size, num_epochs=1, **kwargs):
+ return readers.make_csv_dataset(
+ filenames, batch_size=batch_size, num_epochs=num_epochs, **kwargs)
- def _write_file(self, filename, rows):
- for i in range(len(rows)):
- if isinstance(rows[i], list):
- rows[i] = ",".join(str(v) if v is not None else "" for v in rows[i])
- fn = os.path.join(self.get_temp_dir(), filename)
- f = open(fn, "w")
- f.write("\n".join(rows))
- f.close()
- return fn
-
- def _create_file(self, fileno, header=True):
- rows = []
- if header:
- rows.append(self.COLUMNS)
- for recno in range(self._num_records):
- rows.append(self._csv_values(fileno, recno))
- return self._write_file("csv_file%d.csv" % fileno, rows)
-
- def _create_files(self):
+ def _setup_files(self, inputs, linebreak="\n", compression_type=None):
filenames = []
- for i in range(self._num_files):
- filenames.append(self._create_file(i))
+ for i, ip in enumerate(inputs):
+ fn = os.path.join(self.get_temp_dir(), "temp_%d.csv" % i)
+ contents = linebreak.join(ip).encode("utf-8")
+ if compression_type is None:
+ with open(fn, "wb") as f:
+ f.write(contents)
+ elif compression_type == "GZIP":
+ with gzip.GzipFile(fn, "wb") as f:
+ f.write(contents)
+ elif compression_type == "ZLIB":
+ contents = zlib.compress(contents)
+ with open(fn, "wb") as f:
+ f.write(contents)
+ else:
+ raise ValueError("Unsupported compression_type", compression_type)
+ filenames.append(fn)
return filenames
- def _make_csv_dataset(
- self,
- filenames,
- defaults,
- column_names=COLUMNS,
- label_name=LABEL,
- select_cols=None,
- batch_size=1,
- num_epochs=1,
- shuffle=False,
- shuffle_seed=None,
- header=True,
- na_value="",
- ):
- return readers.make_csv_dataset(
- filenames,
- batch_size=batch_size,
- column_names=column_names,
- column_defaults=defaults,
- label_name=label_name,
- num_epochs=num_epochs,
- shuffle=shuffle,
- shuffle_seed=shuffle_seed,
- header=header,
- na_value=na_value,
- select_columns=select_cols,
- )
-
- def _next_actual_batch(self, file_indices, batch_size, num_epochs, defaults):
- features = {col: list() for col in self.COLUMNS}
+ def _next_expected_batch(self, expected_output, expected_keys, batch_size,
+ num_epochs):
+ features = {k: [] for k in expected_keys}
for _ in range(num_epochs):
- for i in file_indices:
- for j in range(self._num_records):
- values = self._csv_values(i, j)
- for n, v in enumerate(values):
- if v == "": # pylint: disable=g-explicit-bool-comparison
- values[n] = defaults[n][0]
- values[-1] = values[-1].encode("utf-8")
-
- # Regroup lists by column instead of row
- for n, col in enumerate(self.COLUMNS):
- features[col].append(values[n])
- if len(list(features.values())[0]) == batch_size:
- yield features
- features = {col: list() for col in self.COLUMNS}
-
- def _run_actual_batch(self, outputs, sess):
- features, labels = sess.run(outputs)
- batch = [features[k] for k in self.COLUMNS if k != self.LABEL]
- batch.append(labels)
- return batch
-
- def _verify_records(
+ for values in expected_output:
+ for n, key in enumerate(expected_keys):
+ features[key].append(values[n])
+ if len(features[expected_keys[0]]) == batch_size:
+ yield features
+ features = {k: [] for k in expected_keys}
+ if features[expected_keys[0]]: # Leftover from the last batch
+ yield features
+
+ def _verify_output(
self,
sess,
dataset,
- file_indices,
- defaults=tuple(DEFAULT_VALS),
- label_name=LABEL,
- batch_size=1,
- num_epochs=1,
+ batch_size,
+ num_epochs,
+ label_name,
+ expected_output,
+ expected_keys,
):
- iterator = dataset.make_one_shot_iterator()
- get_next = iterator.get_next()
+ nxt = dataset.make_one_shot_iterator().get_next()
- for expected_features in self._next_actual_batch(file_indices, batch_size,
- num_epochs, defaults):
- actual_features = sess.run(get_next)
+ for expected_features in self._next_expected_batch(
+ expected_output,
+ expected_keys,
+ batch_size,
+ num_epochs,
+ ):
+ actual_features = sess.run(nxt)
if label_name is not None:
expected_labels = expected_features.pop(label_name)
- # Compare labels
self.assertAllEqual(expected_labels, actual_features[1])
- actual_features = actual_features[0] # Extract features dict from tuple
+ actual_features = actual_features[0]
for k in expected_features.keys():
# Compare features
self.assertAllEqual(expected_features[k], actual_features[k])
with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
-
- def testMakeCSVDataset(self):
- defaults = self.DEFAULTS
-
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- # Basic test: read from file 0.
- dataset = self._make_csv_dataset(self._test_filenames[0], defaults)
- self._verify_records(sess, dataset, [0])
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- # Basic test: read from file 1.
- dataset = self._make_csv_dataset(self._test_filenames[1], defaults)
- self._verify_records(sess, dataset, [1])
+ sess.run(nxt)
+
+ def _test_dataset(self,
+ inputs,
+ expected_output,
+ expected_keys,
+ batch_size=1,
+ num_epochs=1,
+ label_name=None,
+ **kwargs):
+ """Checks that elements produced by CsvDataset match expected output."""
+ # Convert str type because py3 tf strings are bytestrings
+ filenames = self._setup_files(
+ inputs, compression_type=kwargs.get("compression_type", None))
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
- # Read from both files.
- dataset = self._make_csv_dataset(self._test_filenames, defaults)
- self._verify_records(sess, dataset, range(self._num_files))
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- # Read from both files. Exercise the `batch` and `num_epochs` parameters
- # of make_csv_dataset and make sure they work.
dataset = self._make_csv_dataset(
- self._test_filenames, defaults, batch_size=2, num_epochs=10)
- self._verify_records(
- sess, dataset, range(self._num_files), batch_size=2, num_epochs=10)
+ filenames,
+ batch_size=batch_size,
+ num_epochs=num_epochs,
+ label_name=label_name,
+ **kwargs)
+ self._verify_output(sess, dataset, batch_size, num_epochs, label_name,
+ expected_output, expected_keys)
+
+ def testMakeCSVDataset(self):
+ """Tests making a CSV dataset with keys and defaults provided."""
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+ label = "col0"
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ column_defaults=record_defaults,
+ )
+
+ def testMakeCSVDataset_withBatchSizeAndEpochs(self):
+ """Tests making a CSV dataset with keys and defaults provided."""
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+ label = "col0"
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=3,
+ num_epochs=10,
+ shuffle=False,
+ header=True,
+ column_defaults=record_defaults,
+ )
- def testMakeCSVDataset_withBadColumns(self):
+ def testMakeCSVDataset_withCompressionType(self):
+ """Tests `compression_type` argument."""
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+ label = "col0"
+
+ for compression_type in ("GZIP", "ZLIB"):
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ column_defaults=record_defaults,
+ compression_type=compression_type,
+ )
+
+ def testMakeCSVDataset_withBadInputs(self):
"""Tests that exception is raised when input is malformed.
"""
- dupe_columns = self.COLUMNS[:-1] + self.COLUMNS[:1]
- defaults = self.DEFAULTS
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ filenames = self._setup_files(inputs)
# Duplicate column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
- self._test_filenames, defaults, column_names=dupe_columns)
+ filenames,
+ batch_size=1,
+ column_defaults=record_defaults,
+ label_name="col0",
+ column_names=column_names * 2)
# Label key not one of column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
- self._test_filenames, defaults, label_name="not_a_real_label")
+ filenames,
+ batch_size=1,
+ column_defaults=record_defaults,
+ label_name="not_a_real_label",
+ column_names=column_names)
def testMakeCSVDataset_withNoLabel(self):
- """Tests that CSV datasets can be created when no label is specified.
- """
- defaults = self.DEFAULTS
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- # Read from both files. Make sure this works with no label key supplied.
- dataset = self._make_csv_dataset(
- self._test_filenames,
- defaults,
- batch_size=2,
- num_epochs=10,
- label_name=None)
- self._verify_records(
- sess,
- dataset,
- range(self._num_files),
- batch_size=2,
- num_epochs=10,
- label_name=None)
+ """Tests making a CSV dataset with no label provided."""
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ column_defaults=record_defaults,
+ )
def testMakeCSVDataset_withNoHeader(self):
"""Tests that datasets can be created from CSV files with no header line.
"""
- defaults = self.DEFAULTS
- file_without_header = self._create_file(
- len(self._test_filenames), header=False)
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- file_without_header,
- defaults,
- batch_size=2,
- num_epochs=10,
- header=False,
- )
- self._verify_records(
- sess,
- dataset,
- [len(self._test_filenames)],
- batch_size=2,
- num_epochs=10,
- )
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [["0,1,2,3,4", "5,6,7,8,9"], ["10,11,12,13,14", "15,16,17,18,19"]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+ label = "col0"
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=False,
+ column_defaults=record_defaults,
+ )
def testMakeCSVDataset_withTypes(self):
"""Tests that defaults can be a dtype instead of a Tensor for required vals.
"""
- defaults = [d for d in self.COLUMN_TYPES[:-1]]
- defaults.append(constant_op.constant(["NULL"], dtype=dtypes.string))
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(self._test_filenames, defaults)
- self._verify_records(sess, dataset, range(self._num_files))
+ record_defaults = [
+ dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
+ dtypes.string
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x[0] for x in column_names), "0,1,2,3,4", "5,6,7,8,9"],
+ [
+ ",".join(x[0] for x in column_names), "10,11,12,13,14",
+ "15,16,17,18,19"
+ ]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+ label = "col0"
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ column_defaults=record_defaults,
+ )
def testMakeCSVDataset_withNoColNames(self):
"""Tests that datasets can be created when column names are not specified.
In that case, we should infer the column names from the header lines.
"""
- defaults = self.DEFAULTS
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- # Read from both files. Exercise the `batch` and `num_epochs` parameters
- # of make_csv_dataset and make sure they work.
- dataset = self._make_csv_dataset(
- self._test_filenames,
- defaults,
- column_names=None,
- batch_size=2,
- num_epochs=10)
- self._verify_records(
- sess, dataset, range(self._num_files), batch_size=2, num_epochs=10)
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
+ [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
+ label = "col0"
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ column_defaults=record_defaults,
+ )
def testMakeCSVDataset_withTypeInferenceMismatch(self):
# Test that error is thrown when num fields doesn't match columns
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
+ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
+ ]]
+ filenames = self._setup_files(inputs)
with self.assertRaises(ValueError):
self._make_csv_dataset(
- self._test_filenames,
- column_names=self.COLUMNS + ["extra_name"],
- defaults=None,
+ filenames,
+ column_names=column_names + ["extra_name"],
+ column_defaults=None,
batch_size=2,
num_epochs=10)
@@ -448,197 +550,215 @@ class MakeCsvDatasetTest(test.TestCase):
In that case, we should infer the types from the first N records.
"""
- # Test that it works with standard test files (with header, etc)
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- self._test_filenames, defaults=None, batch_size=2, num_epochs=10)
- self._verify_records(
- sess,
- dataset,
- range(self._num_files),
- batch_size=2,
- num_epochs=10,
- defaults=[[], [], [], [], [""]])
-
- def testMakeCSVDataset_withTypeInferenceTricky(self):
- # Test on a deliberately tricky file (type changes as we read more rows, and
- # there are null values)
- fn = os.path.join(self.get_temp_dir(), "file.csv")
- expected_dtypes = [
- dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float32,
- dtypes.string, dtypes.string
- ]
- col_names = ["col%d" % i for i in range(len(expected_dtypes))]
- rows = [[None, None, None, "NAN", "",
- "a"], [1, 2**31 + 1, 2**64, 123, "NAN", ""],
- ['"123"', 2, 2**64, 123.4, "NAN", '"cd,efg"']]
- expected = [[0, 0, 0, 0, "", "a"], [1, 2**31 + 1, 2**64, 123, "", ""],
- [123, 2, 2**64, 123.4, "", "cd,efg"]]
- for row in expected:
- row[-1] = row[-1].encode("utf-8") # py3 expects byte strings
- row[-2] = row[-2].encode("utf-8") # py3 expects byte strings
- self._write_file("file.csv", [col_names] + rows)
+ column_names = ["col%d" % i for i in range(5)]
+ str_int32_max = str(2**33)
+ inputs = [[
+ ",".join(x for x in column_names),
+ "0,%s,2.0,3e50,rabbit" % str_int32_max
+ ]]
+ expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]]
+ label = "col0"
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- fn,
- defaults=None,
- column_names=None,
- label_name=None,
- na_value="NAN",
- )
- features = dataset.make_one_shot_iterator().get_next()
- # Check that types match
- for i in range(len(expected_dtypes)):
- print(features["col%d" % i].dtype, expected_dtypes[i])
- assert features["col%d" % i].dtype == expected_dtypes[i]
- for i in range(len(rows)):
- assert sess.run(features) == dict(zip(col_names, expected[i]))
-
- def testMakeCSVDataset_withTypeInferenceAllTypes(self):
- # Test that we make the correct inference for all types with fallthrough
- fn = os.path.join(self.get_temp_dir(), "file.csv")
- expected_dtypes = [
- dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
- dtypes.string, dtypes.string
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ )
+
+ def testMakeCSVDataset_withTypeInferenceFallthrough(self):
+ """Tests that datasets can be created when no defaults are specified.
+
+ Tests on a deliberately tricky file.
+ """
+ column_names = ["col%d" % i for i in range(5)]
+ str_int32_max = str(2**33)
+ inputs = [[
+ ",".join(x for x in column_names),
+ ",,,,",
+ "0,0,0.0,0.0,0.0",
+ "0,%s,2.0,3e50,rabbit" % str_int32_max,
+ ",,,,",
+ ]]
+ expected_output = [[0, 0, 0, 0, b""], [0, 0, 0, 0, b"0.0"],
+ [0, 2**33, 2.0, 3e50, b"rabbit"], [0, 0, 0, 0, b""]]
+ label = "col0"
+
+ self._test_dataset(
+ inputs,
+ expected_output=expected_output,
+ expected_keys=column_names,
+ column_names=column_names,
+ label_name=label,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ )
+
+ def testMakeCSVDataset_withSelectCols(self):
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
]
- col_names = ["col%d" % i for i in range(len(expected_dtypes))]
- rows = [[1, 2**31 + 1, 1.0, 4e40, "abc", ""]]
- expected = [[
- 1, 2**31 + 1, 1.0, 4e40, "abc".encode("utf-8"), "".encode("utf-8")
+ column_names = ["col%d" % i for i in range(5)]
+ str_int32_max = str(2**33)
+ inputs = [[
+ ",".join(x for x in column_names),
+ "0,%s,2.0,3e50,rabbit" % str_int32_max
]]
- self._write_file("file.csv", [col_names] + rows)
+ expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]]
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- fn,
- defaults=None,
- column_names=None,
- label_name=None,
- na_value="NAN",
- )
- features = dataset.make_one_shot_iterator().get_next()
- # Check that types match
- for i in range(len(expected_dtypes)):
- self.assertAllEqual(features["col%d" % i].dtype, expected_dtypes[i])
- for i in range(len(rows)):
- self.assertAllEqual(
- sess.run(features), dict(zip(col_names, expected[i])))
+ select_cols = [1, 3, 4]
+ self._test_dataset(
+ inputs,
+ expected_output=[[x[i] for i in select_cols] for x in expected_output],
+ expected_keys=[column_names[i] for i in select_cols],
+ column_names=column_names,
+ column_defaults=[record_defaults[i] for i in select_cols],
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ select_columns=select_cols,
+ )
+
+ # Can still do inference without provided defaults
+ self._test_dataset(
+ inputs,
+ expected_output=[[x[i] for i in select_cols] for x in expected_output],
+ expected_keys=[column_names[i] for i in select_cols],
+ column_names=column_names,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ select_columns=select_cols,
+ )
+
+ # Can still do column name inference
+ self._test_dataset(
+ inputs,
+ expected_output=[[x[i] for i in select_cols] for x in expected_output],
+ expected_keys=[column_names[i] for i in select_cols],
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ select_columns=select_cols,
+ )
+
+ # Can specify column names instead of indices
+ self._test_dataset(
+ inputs,
+ expected_output=[[x[i] for i in select_cols] for x in expected_output],
+ expected_keys=[column_names[i] for i in select_cols],
+ column_names=column_names,
+ batch_size=1,
+ num_epochs=1,
+ shuffle=False,
+ header=True,
+ select_columns=[column_names[i] for i in select_cols],
+ )
def testMakeCSVDataset_withSelectColsError(self):
- data = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
- col_names = ["col%d" % i for i in range(5)]
- fn = self._write_file("file.csv", [col_names] + data)
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+ column_names = ["col%d" % i for i in range(5)]
+ str_int32_max = str(2**33)
+ inputs = [[
+ ",".join(x for x in column_names),
+ "0,%s,2.0,3e50,rabbit" % str_int32_max
+ ]]
+
+ select_cols = [1, 3, 4]
+ filenames = self._setup_files(inputs)
+
with self.assertRaises(ValueError):
# Mismatch in number of defaults and number of columns selected,
# should raise an error
self._make_csv_dataset(
- fn,
- defaults=[[0]] * 5,
- column_names=col_names,
- label_name=None,
- select_cols=[1, 3])
+ filenames,
+ batch_size=1,
+ column_defaults=record_defaults,
+ column_names=column_names,
+ select_columns=select_cols)
+
with self.assertRaises(ValueError):
# Invalid column name should raise an error
self._make_csv_dataset(
- fn,
- defaults=[[0]],
- column_names=col_names,
+ filenames,
+ batch_size=1,
+ column_defaults=[[0]],
+ column_names=column_names,
label_name=None,
- select_cols=["invalid_col_name"])
-
- def testMakeCSVDataset_withSelectCols(self):
- data = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
- col_names = ["col%d" % i for i in range(5)]
- fn = self._write_file("file.csv", [col_names] + data)
- # If select_cols is specified, should only yield a subset of columns
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- fn,
- defaults=[[0], [0]],
- column_names=col_names,
- label_name=None,
- select_cols=[1, 3])
- expected = [[1, 3], [6, 8]]
- features = dataset.make_one_shot_iterator().get_next()
- for i in range(len(data)):
- self.assertAllEqual(
- sess.run(features),
- dict(zip([col_names[1], col_names[3]], expected[i])))
- # Can still do default inference with select_cols
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- fn,
- defaults=None,
- column_names=col_names,
- label_name=None,
- select_cols=[1, 3])
- expected = [[1, 3], [6, 8]]
- features = dataset.make_one_shot_iterator().get_next()
- for i in range(len(data)):
- self.assertAllEqual(
- sess.run(features),
- dict(zip([col_names[1], col_names[3]], expected[i])))
- # Can still do column name inference
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- fn,
- defaults=None,
- column_names=None,
- label_name=None,
- select_cols=[1, 3])
- expected = [[1, 3], [6, 8]]
- features = dataset.make_one_shot_iterator().get_next()
- for i in range(len(data)):
- self.assertAllEqual(
- sess.run(features),
- dict(zip([col_names[1], col_names[3]], expected[i])))
- # Can specify column names instead of indices
- with ops.Graph().as_default() as g:
- with self.test_session(graph=g) as sess:
- dataset = self._make_csv_dataset(
- fn,
- defaults=None,
- column_names=None,
- label_name=None,
- select_cols=[col_names[1], col_names[3]])
- expected = [[1, 3], [6, 8]]
- features = dataset.make_one_shot_iterator().get_next()
- for i in range(len(data)):
- self.assertAllEqual(
- sess.run(features),
- dict(zip([col_names[1], col_names[3]], expected[i])))
+ select_columns=["invalid_col_name"])
def testMakeCSVDataset_withShuffle(self):
- total_records = self._num_files * self._num_records
- defaults = self.DEFAULTS
+ record_defaults = [
+ constant_op.constant([], dtypes.int32),
+ constant_op.constant([], dtypes.int64),
+ constant_op.constant([], dtypes.float32),
+ constant_op.constant([], dtypes.float64),
+ constant_op.constant([], dtypes.string)
+ ]
+
+ def str_series(st):
+ return ",".join(str(i) for i in range(st, st + 5))
+
+ column_names = ["col%d" % i for i in range(5)]
+ inputs = [
+ [",".join(x for x in column_names)
+ ] + [str_series(5 * i) for i in range(15)],
+ [",".join(x for x in column_names)] +
+ [str_series(5 * i) for i in range(15, 20)],
+ ]
+
+ filenames = self._setup_files(inputs)
+
+ total_records = 20
for batch_size in [1, 2]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Test that shuffling with the same seed produces the same result
dataset1 = self._make_csv_dataset(
- self._test_filenames,
- defaults,
+ filenames,
+ column_defaults=record_defaults,
+ column_names=column_names,
batch_size=batch_size,
+ header=True,
shuffle=True,
- shuffle_seed=5)
+ shuffle_seed=5,
+ num_epochs=2,
+ )
dataset2 = self._make_csv_dataset(
- self._test_filenames,
- defaults,
+ filenames,
+ column_defaults=record_defaults,
+ column_names=column_names,
batch_size=batch_size,
+ header=True,
shuffle=True,
- shuffle_seed=5)
+ shuffle_seed=5,
+ num_epochs=2,
+ )
outputs1 = dataset1.make_one_shot_iterator().get_next()
outputs2 = dataset2.make_one_shot_iterator().get_next()
for _ in range(total_records // batch_size):
- batch1 = self._run_actual_batch(outputs1, sess)
- batch2 = self._run_actual_batch(outputs2, sess)
+ batch1 = nest.flatten(sess.run(outputs1))
+ batch2 = nest.flatten(sess.run(outputs2))
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
@@ -646,23 +766,31 @@ class MakeCsvDatasetTest(test.TestCase):
with self.test_session(graph=g) as sess:
# Test that shuffling with a different seed produces different results
dataset1 = self._make_csv_dataset(
- self._test_filenames,
- defaults,
+ filenames,
+ column_defaults=record_defaults,
+ column_names=column_names,
batch_size=batch_size,
+ header=True,
shuffle=True,
- shuffle_seed=5)
+ shuffle_seed=5,
+ num_epochs=2,
+ )
dataset2 = self._make_csv_dataset(
- self._test_filenames,
- defaults,
+ filenames,
+ column_defaults=record_defaults,
+ column_names=column_names,
batch_size=batch_size,
+ header=True,
shuffle=True,
- shuffle_seed=6)
+ shuffle_seed=6,
+ num_epochs=2,
+ )
outputs1 = dataset1.make_one_shot_iterator().get_next()
outputs2 = dataset2.make_one_shot_iterator().get_next()
all_equal = False
for _ in range(total_records // batch_size):
- batch1 = self._run_actual_batch(outputs1, sess)
- batch2 = self._run_actual_batch(outputs2, sess)
+ batch1 = nest.flatten(sess.run(outputs1))
+ batch2 = nest.flatten(sess.run(outputs2))
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
@@ -874,6 +1002,5 @@ class MakeTFRecordDatasetTest(
self._shuffle_test(batch_size, num_epochs, num_parallel_reads,
seed=21345)
-
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/data/python/kernel_tests/serialization/BUILD b/tensorflow/contrib/data/python/kernel_tests/serialization/BUILD
index 686788522a..3c3f23f9a9 100644
--- a/tensorflow/contrib/data/python/kernel_tests/serialization/BUILD
+++ b/tensorflow/contrib/data/python/kernel_tests/serialization/BUILD
@@ -73,6 +73,20 @@ py_test(
)
py_test(
+ name = "csv_dataset_serialization_test",
+ size = "small",
+ srcs = ["csv_dataset_serialization_test.py"],
+ srcs_version = "PY2AND3",
+ tags = ["no_pip"],
+ deps = [
+ ":dataset_serialization_test_base",
+ "//tensorflow/contrib/data/python/ops:readers",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_ops",
+ ],
+)
+
+py_test(
name = "dataset_constructor_serialization_test",
size = "medium",
srcs = ["dataset_constructor_serialization_test.py"],
diff --git a/tensorflow/contrib/data/python/kernel_tests/serialization/csv_dataset_serialization_test.py b/tensorflow/contrib/data/python/kernel_tests/serialization/csv_dataset_serialization_test.py
new file mode 100644
index 0000000000..247f2046ea
--- /dev/null
+++ b/tensorflow/contrib/data/python/kernel_tests/serialization/csv_dataset_serialization_test.py
@@ -0,0 +1,73 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for the CsvDataset serialization."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import gzip
+import os
+
+from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base
+from tensorflow.contrib.data.python.ops import readers
+from tensorflow.python.platform import test
+
+
+class CsvDatasetSerializationTest(
+ dataset_serialization_test_base.DatasetSerializationTestBase):
+
+ def setUp(self):
+ self._num_cols = 7
+ self._num_rows = 10
+ self._num_epochs = 14
+ self._num_outputs = self._num_rows * self._num_epochs
+
+ inputs = [
+ ",".join(str(self._num_cols * j + i)
+ for i in range(self._num_cols))
+ for j in range(self._num_rows)
+ ]
+ contents = "\n".join(inputs).encode("utf-8")
+
+ self._filename = os.path.join(self.get_temp_dir(), "file.csv")
+ self._compressed = os.path.join(self.get_temp_dir(),
+ "comp.csv") # GZip compressed
+
+ with open(self._filename, "wb") as f:
+ f.write(contents)
+ with gzip.GzipFile(self._compressed, "wb") as f:
+ f.write(contents)
+
+ def ds_func(self, **kwargs):
+ compression_type = kwargs.get("compression_type", None)
+ if compression_type == "GZIP":
+ filename = self._compressed
+ elif compression_type is None:
+ filename = self._filename
+ else:
+ raise ValueError("Invalid compression type:", compression_type)
+
+ return readers.CsvDataset(filename, **kwargs).repeat(self._num_epochs)
+
+ def testSerializationCore(self):
+ defs = [[0]] * self._num_cols
+ self.run_core_tests(
+ lambda: self.ds_func(record_defaults=defs, buffer_size=2),
+ lambda: self.ds_func(record_defaults=defs, buffer_size=12),
+ self._num_outputs)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py b/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
index 5590a4bf78..8b2f846494 100644
--- a/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
+++ b/tensorflow/contrib/data/python/kernel_tests/slide_dataset_op_test.py
@@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import sliding
@@ -29,28 +30,45 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
-class SlideDatasetTest(test.TestCase):
-
- def testSlideDataset(self):
- """Test an dataset that maps a TF function across its input elements."""
+class SlideDatasetTest(test.TestCase, parameterized.TestCase):
+
+ @parameterized.parameters(
+ (20, 14, 7, 1),
+ (20, 17, 9, 1),
+ (20, 14, 14, 1),
+ (20, 10, 14, 1),
+ (20, 14, 19, 1),
+ (20, 4, 1, 2),
+ (20, 2, 1, 6),
+ (20, 4, 7, 2),
+ (20, 2, 7, 6),
+ (1, 10, 4, 1),
+ (0, 10, 4, 1),
+ )
+ def testSlideDataset(self, count, window_size, window_shift, window_stride):
+ """Tests a dataset that slides a window its input elements."""
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
- count = array_ops.placeholder(dtypes.int64, shape=[])
- window_size = array_ops.placeholder(dtypes.int64, shape=[])
- stride = array_ops.placeholder(dtypes.int64, shape=[])
+ count_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
- # RepeatDataset(count) -> _SlideDataset(window_size, stride).
- iterator = (dataset_ops.Dataset.from_tensor_slices(components)
- .map(_map_fn)
- .repeat(count)
- .apply(sliding.sliding_window_batch(window_size, stride))
- .make_initializable_iterator())
+ # RepeatDataset(count) ->
+ # _SlideDataset(window_size, window_shift, window_stride).
+ iterator = (
+ dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
+ .repeat(count).apply(
+ sliding.sliding_window_batch(
+ window_size=window_size_t,
+ window_shift=window_shift_t,
+ window_stride=window_stride_t)).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -58,90 +76,126 @@ class SlideDatasetTest(test.TestCase):
[t.shape.as_list() for t in get_next])
with self.test_session() as sess:
- # stride < window_size.
- # Slide over a finite input, where the window_size divides the
- # total number of elements.
- sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 7})
- # Same formula with convolution layer.
- num_batches = (20 * 7 - 14) // 7 + 1
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(14):
- self.assertAllEqual(component[(i*7 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
- # Slide over a finite input, where the window_size does not
- # divide the total number of elements.
- sess.run(init_op, feed_dict={count: 20, window_size: 17, stride: 9})
- num_batches = (20 * 7 - 17) // 9 + 1
+ sess.run(
+ init_op,
+ feed_dict={
+ count_t: count,
+ window_size_t: window_size,
+ window_shift_t: window_shift,
+ window_stride_t: window_stride
+ })
+ num_batches = (count * 7 - (
+ (window_size - 1) * window_stride + 1)) // window_shift + 1
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
- for j in range(17):
- self.assertAllEqual(component[(i*9 + j) % 7]**2,
- result_component[j])
+ for j in range(window_size):
+ self.assertAllEqual(
+ component[(i * window_shift + j * window_stride) % 7]**2,
+ result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
- # stride == window_size.
- sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 14})
- num_batches = 20 * 7 // 14
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(14):
- self.assertAllEqual(component[(i*14 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ @parameterized.parameters(
+ (20, 14, 7, 1),
+ (20, 17, 9, 1),
+ (20, 14, 14, 1),
+ (20, 10, 14, 1),
+ (20, 14, 19, 1),
+ (20, 4, 1, 2),
+ (20, 2, 1, 6),
+ (20, 4, 7, 2),
+ (20, 2, 7, 6),
+ (1, 10, 4, 1),
+ (0, 10, 4, 1),
+ )
+ def testSlideDatasetDeprecated(self, count, window_size, stride,
+ window_stride):
+ """Tests a dataset that slides a window its input elements."""
+ components = (np.arange(7),
+ np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
+ np.array(37.0) * np.arange(7))
- # stride > window_size.
- sess.run(init_op, feed_dict={count: 20, window_size: 10, stride: 14})
- num_batches = 20 * 7 // 14
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(10):
- self.assertAllEqual(component[(i*14 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
- # Drop the last batch which is smaller than window_size.
- sess.run(init_op, feed_dict={count: 20, window_size: 14, stride: 19})
- num_batches = (20 * 7 - 7) // 19 # = 19 * 7 // 19
- for i in range(num_batches):
- result = sess.run(get_next)
- for component, result_component in zip(components, result):
- for j in range(14):
- self.assertAllEqual(component[(i*19 + j) % 7]**2,
- result_component[j])
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ count_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+ stride_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
- # Slide over a finite input, which is less than window_size,
- # should fail straight away.
- sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 4})
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ def _map_fn(x, y, z):
+ return math_ops.square(x), math_ops.square(y), math_ops.square(z)
- sess.run(init_op, feed_dict={count: 1, window_size: 10, stride: 8})
- with self.assertRaises(errors.OutOfRangeError):
- sess.run(get_next)
+ # The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
+ # RepeatDataset(count) -> _SlideDataset(window_size, stride, window_stride).
+ iterator = (
+ dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
+ .repeat(count).apply(
+ sliding.sliding_window_batch(
+ window_size=window_size_t,
+ stride=stride_t,
+ window_stride=window_stride_t)).make_initializable_iterator())
+ init_op = iterator.initializer
+ get_next = iterator.get_next()
- # Slide over an empty input should fail straight away.
- sess.run(init_op, feed_dict={count: 0, window_size: 8, stride: 4})
+ self.assertEqual([[None] + list(c.shape[1:]) for c in components],
+ [t.shape.as_list() for t in get_next])
+
+ with self.test_session() as sess:
+ sess.run(
+ init_op,
+ feed_dict={
+ count_t: count,
+ window_size_t: window_size,
+ stride_t: stride,
+ window_stride_t: window_stride
+ })
+ num_batches = (count * 7 - (
+ (window_size - 1) * window_stride + 1)) // stride + 1
+ for i in range(num_batches):
+ result = sess.run(get_next)
+ for component, result_component in zip(components, result):
+ for j in range(window_size):
+ self.assertAllEqual(
+ component[(i * stride + j * window_stride) % 7]**2,
+ result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
- # Empty window_size should be an initialization time error.
- with self.assertRaises(errors.InvalidArgumentError):
- sess.run(init_op, feed_dict={count: 14, window_size: 0, stride: 0})
+ @parameterized.parameters(
+ (14, 0, 3, 1),
+ (14, 3, 0, 1),
+ (14, 3, 3, 0),
+ )
+ def testSlideDatasetInvalid(self, count, window_size, window_shift,
+ window_stride):
+ count_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_size_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_shift_t = array_ops.placeholder(dtypes.int64, shape=[])
+ window_stride_t = array_ops.placeholder(dtypes.int64, shape=[])
+
+ iterator = (
+ dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count_t).apply(
+ sliding.sliding_window_batch(
+ window_size=window_size_t,
+ window_shift=window_shift_t,
+ window_stride=window_stride_t)).make_initializable_iterator())
+ init_op = iterator.initializer
- # Invalid stride should be an initialization time error.
+ with self.test_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
- sess.run(init_op, feed_dict={count: 14, window_size: 3, stride: 0})
+ sess.run(
+ init_op,
+ feed_dict={
+ count_t: count,
+ window_size_t: window_size,
+ window_shift_t: window_shift,
+ window_stride_t: window_stride
+ })
+
+ def testSlideDatasetValueError(self):
+ with self.assertRaises(ValueError):
+ dataset_ops.Dataset.range(10).map(lambda x: x).apply(
+ sliding.sliding_window_batch(
+ window_size=1, stride=1, window_shift=1, window_stride=1))
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
@@ -155,7 +209,8 @@ class SlideDatasetTest(test.TestCase):
indices=[[0]], values=(i * [1]), dense_shape=[1])
iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
- sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
+ sliding.sliding_window_batch(
+ window_size=5, window_shift=3)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -183,7 +238,8 @@ class SlideDatasetTest(test.TestCase):
dense_shape=[i])
iterator = dataset_ops.Dataset.range(10).map(_sparse).apply(
- sliding.sliding_window_batch(5, 3)).make_initializable_iterator()
+ sliding.sliding_window_batch(
+ window_size=5, window_shift=3)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -213,11 +269,11 @@ class SlideDatasetTest(test.TestCase):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
- iterator = (dataset_ops.Dataset.range(10)
- .map(_sparse)
- .apply(sliding.sliding_window_batch(4, 2))
- .apply(sliding.sliding_window_batch(3, 1))
- .make_initializable_iterator())
+ iterator = (
+ dataset_ops.Dataset.range(10).map(_sparse).apply(
+ sliding.sliding_window_batch(window_size=4, window_shift=2)).apply(
+ sliding.sliding_window_batch(window_size=3, window_shift=1))
+ .make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
@@ -226,9 +282,9 @@ class SlideDatasetTest(test.TestCase):
# Slide: 1st batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
- indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],
- [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],
- [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],
+ indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
+ [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
+ [2, 2, 0], [2, 3, 0]],
values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
@@ -236,9 +292,9 @@ class SlideDatasetTest(test.TestCase):
# Slide: 2nd batch.
actual = sess.run(get_next)
expected = sparse_tensor.SparseTensorValue(
- indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0],
- [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0],
- [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]],
+ indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0],
+ [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0],
+ [2, 2, 0], [2, 3, 0]],
values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9],
dense_shape=[3, 4, 1])
self.assertTrue(sparse_tensor.is_sparse(actual))
@@ -253,10 +309,11 @@ class SlideDatasetTest(test.TestCase):
yield [4.0, 5.0, 6.0]
yield [7.0, 8.0, 9.0, 10.0]
- iterator = (dataset_ops.Dataset.from_generator(generator, dtypes.float32,
- output_shapes=[None])
- .apply(sliding.sliding_window_batch(3, 1))
- .make_initializable_iterator())
+ iterator = (
+ dataset_ops.Dataset.from_generator(
+ generator, dtypes.float32, output_shapes=[None]).apply(
+ sliding.sliding_window_batch(window_size=3, window_shift=1))
+ .make_initializable_iterator())
next_element = iterator.get_next()
with self.test_session() as sess:
diff --git a/tensorflow/contrib/data/python/ops/BUILD b/tensorflow/contrib/data/python/ops/BUILD
index 160d7fe22a..1ad021ea03 100644
--- a/tensorflow/contrib/data/python/ops/BUILD
+++ b/tensorflow/contrib/data/python/ops/BUILD
@@ -28,10 +28,12 @@ py_library(
srcs = ["get_single_element.py"],
srcs_version = "PY2AND3",
deps = [
+ ":grouping",
"//tensorflow/python:dataset_ops_gen",
"//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/data/util:nest",
"//tensorflow/python/data/util:sparse",
+ "//third_party/py/numpy",
],
)
@@ -129,6 +131,7 @@ py_library(
"//tensorflow/python/data/util:convert",
"//tensorflow/python/data/util:nest",
"//tensorflow/python/data/util:sparse",
+ "//third_party/py/numpy",
],
)
diff --git a/tensorflow/contrib/data/python/ops/get_single_element.py b/tensorflow/contrib/data/python/ops/get_single_element.py
index 0f4cd8e20c..ef9284456e 100644
--- a/tensorflow/contrib/data/python/ops/get_single_element.py
+++ b/tensorflow/contrib/data/python/ops/get_single_element.py
@@ -17,6 +17,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import numpy as np
+
+from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
@@ -68,3 +71,30 @@ def get_single_element(dataset):
return sparse.deserialize_sparse_tensors(
nested_ret, dataset.output_types, dataset.output_shapes,
dataset.output_classes)
+
+
+def reduce_dataset(dataset, reducer):
+ """Returns the result of reducing the `dataset` using `reducer`.
+
+ Args:
+ dataset: A @{tf.data.Dataset} object.
+ reducer: A @{tf.contrib.data.Reducer} object representing the reduce logic.
+
+ Returns:
+ A nested structure of @{tf.Tensor} objects, corresponding to the result
+ of reducing `dataset` using `reducer`.
+
+ Raises:
+ TypeError: if `dataset` is not a `tf.data.Dataset` object.
+ """
+ if not isinstance(dataset, dataset_ops.Dataset):
+ raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
+
+ # The sentinel dataset is used in case the reduced dataset is empty.
+ sentinel_dataset = dataset_ops.Dataset.from_tensors(
+ reducer.finalize_func(reducer.init_func(np.int64(0))))
+ reduced_dataset = dataset.apply(
+ grouping.group_by_reducer(lambda x: np.int64(0), reducer))
+
+ return get_single_element(
+ reduced_dataset.concatenate(sentinel_dataset).take(1))
diff --git a/tensorflow/contrib/data/python/ops/optimization.py b/tensorflow/contrib/data/python/ops/optimization.py
index cf89657226..018c5115e1 100644
--- a/tensorflow/contrib/data/python/ops/optimization.py
+++ b/tensorflow/contrib/data/python/ops/optimization.py
@@ -18,12 +18,34 @@ from __future__ import division
from __future__ import print_function
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
+from tensorflow.contrib.data.python.ops import gen_dataset_ops as contrib_gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
+# TODO(jsimsa): Support RE matching for both individual transformation (e.g. to
+# account for indexing) and transformation sequence.
+def assert_next(transformations):
+ """A transformation that asserts which transformations happen next.
+
+ Args:
+ transformations: A `tf.string` vector `tf.Tensor` identifying the
+ transformations that are expected to happen next.
+
+ Returns:
+ A `Dataset` transformation function, which can be passed to
+ @{tf.data.Dataset.apply}.
+ """
+
+ def _apply_fn(dataset):
+ """Function from `Dataset` to `Dataset` that applies the transformation."""
+ return _AssertNextDataset(dataset, transformations)
+
+ return _apply_fn
+
+
def optimize(optimizations=None):
"""A transformation that applies optimizations.
@@ -44,6 +66,37 @@ def optimize(optimizations=None):
return _apply_fn
+class _AssertNextDataset(dataset_ops.Dataset):
+ """A `Dataset` that asserts which transformations happen next."""
+
+ def __init__(self, input_dataset, transformations):
+ """See `assert_next()` for details."""
+ super(_AssertNextDataset, self).__init__()
+ self._input_dataset = input_dataset
+ if transformations is None:
+ raise ValueError("At least one transformation should be specified")
+ self._transformations = ops.convert_to_tensor(
+ transformations, dtype=dtypes.string, name="transformations")
+
+ def _as_variant_tensor(self):
+ return contrib_gen_dataset_ops.assert_next_dataset(
+ self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
+ self._transformations,
+ **dataset_ops.flat_structure(self))
+
+ @property
+ def output_classes(self):
+ return self._input_dataset.output_classes
+
+ @property
+ def output_shapes(self):
+ return self._input_dataset.output_shapes
+
+ @property
+ def output_types(self):
+ return self._input_dataset.output_types
+
+
class _OptimizeDataset(dataset_ops.Dataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
diff --git a/tensorflow/contrib/data/python/ops/prefetching_ops.py b/tensorflow/contrib/data/python/ops/prefetching_ops.py
index 50212d3b52..0edd7c9fe9 100644
--- a/tensorflow/contrib/data/python/ops/prefetching_ops.py
+++ b/tensorflow/contrib/data/python/ops/prefetching_ops.py
@@ -31,6 +31,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops as core_gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
@@ -480,6 +481,11 @@ class _CopyToDeviceDataset(dataset_ops.Dataset):
self._finalize_func = _remote_finalize_func
self._finalize_captured_args = _remote_finalize_func.captured_inputs
+
+ g = ops.get_default_graph()
+ _remote_init_func.add_to_graph(g)
+ _remote_next_func.add_to_graph(g)
+ _remote_finalize_func.add_to_graph(g)
# pylint: enable=protected-scope
# The one_shot_iterator implementation needs a 0 arg _make_dataset function
@@ -518,3 +524,174 @@ class _CopyToDeviceDataset(dataset_ops.Dataset):
@property
def output_classes(self):
return self._input_dataset.output_classes
+
+
+class _PerDeviceGenerator(dataset_ops.Dataset):
+ """A `dummy` generator dataset."""
+
+ def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
+ source_device, target_device, output_shapes, output_types,
+ output_classes):
+ self._target_device = target_device
+ self._output_types = output_types
+ self._output_shapes = output_shapes
+ self._output_classes = output_classes
+ self._flat_output_shapes = nest.flatten(
+ sparse.as_dense_shapes(self._output_shapes, self._output_classes))
+ self._flat_output_types = nest.flatten(
+ sparse.as_dense_types(self._output_types, self._output_classes))
+
+ multi_device_iterator_string_handle = (
+ gen_dataset_ops.multi_device_iterator_to_string_handle(
+ multi_device_iterator_resource))
+
+ @function.Defun()
+ def _init_func():
+ return multi_device_iterator_string_handle
+
+ @function.Defun()
+ def _remote_init_func():
+ return functional_ops.remote_call(
+ target=source_device,
+ args=_init_func.captured_inputs,
+ Tout=[dtypes.string],
+ f=_init_func)
+
+ self._init_func = _remote_init_func
+ self._init_captured_args = _remote_init_func.captured_inputs
+
+ @function.Defun(dtypes.string)
+ def _next_func(string_handle):
+ multi_device_iterator = (
+ gen_dataset_ops.multi_device_iterator_from_string_handle(
+ string_handle=string_handle,
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes))
+ return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
+ multi_device_iterator=multi_device_iterator,
+ shard_num=shard_num,
+ incarnation_id=incarnation_id,
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes)
+
+ @function.Defun(dtypes.string)
+ def _remote_next_func(string_handle):
+ return functional_ops.remote_call(
+ target=source_device,
+ args=[string_handle] + _next_func.captured_inputs,
+ Tout=self._flat_output_types,
+ f=_next_func)
+
+ self._next_func = _remote_next_func
+ self._next_captured_args = _remote_next_func.captured_inputs
+
+ @function.Defun(dtypes.string)
+ def _finalize_func(unused_string_handle):
+ return array_ops.constant(0, dtypes.int64)
+
+ @function.Defun(dtypes.string)
+ def _remote_finalize_func(string_handle):
+ return functional_ops.remote_call(
+ target=source_device,
+ args=[string_handle] + _finalize_func.captured_inputs,
+ Tout=[dtypes.int64],
+ f=_finalize_func)
+
+ self._finalize_func = _remote_finalize_func
+ self._finalize_captured_args = _remote_finalize_func.captured_inputs
+
+ def _as_variant_tensor(self):
+ with ops.device(self._target_device):
+ return core_gen_dataset_ops.generator_dataset(
+ self._init_captured_args,
+ self._next_captured_args,
+ self._finalize_captured_args,
+ init_func=self._init_func,
+ next_func=self._next_func,
+ finalize_func=self._finalize_func,
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes)
+
+ @property
+ def output_types(self):
+ return self._output_types
+
+ @property
+ def output_shapes(self):
+ return self._output_shapes
+
+ @property
+ def output_classes(self):
+ return self._output_classes
+
+
+class MultiDeviceIterator(object):
+ """An iterator over multiple devices."""
+
+ def __init__(self,
+ dataset,
+ devices,
+ prefetch_buffer_size=1,
+ source_device="/cpu:0"):
+ self._dataset = dataset
+ self._devices = devices
+ self._source_device = source_device
+ self._source_device_tensor = ops.convert_to_tensor(source_device)
+
+ self._flat_output_shapes = nest.flatten(
+ sparse.as_dense_shapes(self._dataset.output_shapes,
+ self._dataset.output_classes))
+ self._flat_output_types = nest.flatten(
+ sparse.as_dense_types(self._dataset.output_types,
+ self._dataset.output_classes))
+
+ # Create the MultiDeviceIterator.
+ with ops.device(self._source_device):
+ self._multi_device_iterator_resource = (
+ gen_dataset_ops.multi_device_iterator(
+ devices=self._devices,
+ shared_name="",
+ container="",
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes))
+
+ # The incarnation ID is used to ensure consistency between the per-device
+ # iterators and the multi-device iterator.
+ self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
+ self._dataset._as_variant_tensor(), # pylint: disable=protected-access
+ self._multi_device_iterator_resource)
+
+ # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
+ # initialize the device side of the pipeline. This would allow the
+ # MultiDeviceIterator to choose, for example, to move some transformations
+ # into the device side from its input. It might be useful in rewriting.
+ # Create the per device iterators.
+ self._device_iterators = []
+ i = 0
+ for device in self._devices:
+ ds = _PerDeviceGenerator(
+ i, self._multi_device_iterator_resource, self._incarnation_id,
+ self._source_device_tensor, device, self._dataset.output_shapes,
+ self._dataset.output_types, self._dataset.output_classes)
+ ds = ds.prefetch(prefetch_buffer_size)
+ with ops.device(device):
+ self._device_iterators.append(ds.make_initializable_iterator())
+ i += 1
+
+ device_iterator_initializers = [
+ iterator.initializer for iterator in self._device_iterators
+ ]
+ self._initializer = control_flow_ops.group(*device_iterator_initializers)
+
+ def get_next(self):
+ result = []
+ i = 0
+ for device in self._devices:
+ with ops.device(device):
+ result.append(self._device_iterators[i].get_next())
+ i += 1
+ return result
+
+ @property
+ def initializer(self):
+ return self._initializer
diff --git a/tensorflow/contrib/data/python/ops/readers.py b/tensorflow/contrib/data/python/ops/readers.py
index 9373e37f5f..f018dd02e6 100644
--- a/tensorflow/contrib/data/python/ops/readers.py
+++ b/tensorflow/contrib/data/python/ops/readers.py
@@ -326,6 +326,7 @@ def make_csv_dataset(
num_parallel_parser_calls=2,
sloppy=False,
num_rows_for_inference=100,
+ compression_type=None,
):
"""Reads CSV files into a dataset.
@@ -399,6 +400,8 @@ def make_csv_dataset(
num_rows_for_inference: Number of rows of a file to use for type inference
if record_defaults is not provided. If None, reads all the rows of all
the files. Defaults to 100.
+ compression_type: (Optional.) A `tf.string` scalar evaluating to one of
+ `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no compression.
Returns:
A dataset, where each element is a (features, labels) tuple that corresponds
@@ -461,7 +464,9 @@ def make_csv_dataset(
use_quote_delim=use_quote_delim,
na_value=na_value,
select_cols=select_columns,
- header=header)
+ header=header,
+ compression_type=compression_type,
+ )
def map_fn(*columns):
"""Organizes columns into a features dictionary.
@@ -505,6 +510,7 @@ class CsvDataset(dataset_ops.Dataset):
def __init__(self,
filenames,
record_defaults,
+ compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
@@ -562,6 +568,9 @@ class CsvDataset(dataset_ops.Dataset):
both this and `select_columns` are specified, these must have the same
lengths, and `column_defaults` is assumed to be sorted in order of
increasing column index.
+ compression_type: (Optional.) A `tf.string` scalar evaluating to one of
+ `""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
+ compression.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer while reading files. Defaults to 4MB.
header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
@@ -581,6 +590,11 @@ class CsvDataset(dataset_ops.Dataset):
super(CsvDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
+ self._compression_type = convert.optional_param_to_tensor(
+ "compression_type",
+ compression_type,
+ argument_default="",
+ argument_dtype=dtypes.string)
record_defaults = [
constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
for x in record_defaults
@@ -621,6 +635,7 @@ class CsvDataset(dataset_ops.Dataset):
use_quote_delim=self._use_quote_delim,
na_value=self._na_value,
select_cols=self._select_cols,
+ compression_type=self._compression_type,
)
@property
diff --git a/tensorflow/contrib/data/python/ops/sliding.py b/tensorflow/contrib/data/python/ops/sliding.py
index 3f3c5ca17c..e9dd74530a 100644
--- a/tensorflow/contrib/data/python/ops/sliding.py
+++ b/tensorflow/contrib/data/python/ops/sliding.py
@@ -23,25 +23,29 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
+from tensorflow.python.util import deprecation
class _SlideDataset(dataset_ops.Dataset):
"""A `Dataset` that passes a sliding window over its input."""
- def __init__(self, input_dataset, window_size, stride=1):
+ def __init__(self, input_dataset, window_size, window_shift, window_stride):
"""See `sliding_window_batch` for details."""
super(_SlideDataset, self).__init__()
self._input_dataset = input_dataset
self._window_size = ops.convert_to_tensor(
- window_size, dtype=dtypes.int64, name="window_size")
- self._stride = ops.convert_to_tensor(
- stride, dtype=dtypes.int64, name="stride")
+ window_size, dtype=dtypes.int64, name="window_stride")
+ self._window_stride = ops.convert_to_tensor(
+ window_stride, dtype=dtypes.int64, name="window_stride")
+ self._window_shift = ops.convert_to_tensor(
+ window_shift, dtype=dtypes.int64, name="window_shift")
def _as_variant_tensor(self):
return gen_dataset_ops.slide_dataset(
self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access
window_size=self._window_size,
- stride=self._stride,
+ window_shift=self._window_shift,
+ window_stride=self._window_stride,
**dataset_ops.flat_structure(self))
@property
@@ -61,38 +65,63 @@ class _SlideDataset(dataset_ops.Dataset):
return self._input_dataset.output_types
-def sliding_window_batch(window_size, stride=1):
- """A sliding window with size of `window_size` and step of `stride`.
+@deprecation.deprecated_args(
+ None, "stride is deprecated, use window_shift instead", "stride")
+def sliding_window_batch(window_size,
+ stride=None,
+ window_shift=None,
+ window_stride=1):
+ """A sliding window over a dataset.
- This transformation passes a sliding window over this dataset. The
- window size is `window_size` and step size is `stride`. If the left
- elements cannot fill up the sliding window, this transformation will
- drop the final smaller element. For example:
+ This transformation passes a sliding window over this dataset. The window size
+ is `window_size`, the stride of the input elements is `window_stride`, and the
+ shift between consecutive windows is `window_shift`. If the remaining elements
+ cannot fill up the sliding window, this transformation will drop the final
+ smaller element. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { [1], [2], [3], [4], [5], [6] }
- a.apply(tf.contrib.data.sliding_window_batch(window_size=3, stride=2)) ==
- {
- [[1], [2], [3]],
- [[3], [4], [5]],
- }
+ a.apply(sliding_window_batch(window_size=3)) ==
+ { [[1], [2], [3]], [[2], [3], [4]], [[3], [4], [5]], [[4], [5], [6]] }
+
+ a.apply(sliding_window_batch(window_size=3, window_shift=2)) ==
+ { [[1], [2], [3]], [[3], [4], [5]] }
+
+ a.apply(sliding_window_batch(window_size=3, window_stride=2)) ==
+ { [[1], [3], [5]], [[2], [4], [6]] }
```
Args:
window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
- elements in the sliding window.
+ elements in the sliding window. It must be positive.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
- steps moving the sliding window forward for one iteration. The default
- is `1`. It must be positive.
+ forward shift of the sliding window in each iteration. The default is `1`.
+ It must be positive. Deprecated alias for `window_shift`.
+ window_shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
+ forward shift of the sliding window in each iteration. The default is `1`.
+ It must be positive.
+ window_stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
+ stride of the input elements in the sliding window. The default is `1`.
+ It must be positive.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
+
+ Raises:
+ ValueError: if invalid arguments are provided.
"""
+ if stride is None and window_shift is None:
+ window_shift = 1
+ elif stride is not None and window_shift is None:
+ window_shift = stride
+ elif stride is not None and window_shift is not None:
+ raise ValueError("Cannot specify both `stride` and `window_shift`")
+
def _apply_fn(dataset):
- return _SlideDataset(dataset, window_size, stride)
+ return _SlideDataset(dataset, window_size, window_shift, window_stride)
return _apply_fn
diff --git a/tensorflow/contrib/distribute/README.md b/tensorflow/contrib/distribute/README.md
index 44a4481021..2f5dd10550 100644
--- a/tensorflow/contrib/distribute/README.md
+++ b/tensorflow/contrib/distribute/README.md
@@ -116,8 +116,6 @@ in the input function gives a solid boost in performance. When using
## Caveats
This feature is in early stages and there are a lot of improvements forthcoming:
-* Metrics are not yet supported during distributed training. They are still
-supported during the evaluation.
* Summaries are only computed in the first tower in `MirroredStrategy`.
* Evaluation is not yet distributed.
* Eager support is in the works; performance can be more challenging with eager
diff --git a/tensorflow/contrib/distribute/python/BUILD b/tensorflow/contrib/distribute/python/BUILD
index 40dbfa3dd2..f5d7e24ae2 100644
--- a/tensorflow/contrib/distribute/python/BUILD
+++ b/tensorflow/contrib/distribute/python/BUILD
@@ -610,3 +610,40 @@ cuda_py_test(
"no_pip",
],
)
+
+cuda_py_test(
+ name = "warm_starting_util_test",
+ size = "medium",
+ srcs = ["warm_starting_util_test.py"],
+ additional_deps = [
+ ":combinations",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
+ tags = [
+ "multi_and_single_gpu",
+ "no_pip",
+ ],
+)
+
+cuda_py_test(
+ name = "checkpoint_utils_test",
+ size = "medium",
+ srcs = ["checkpoint_utils_test.py"],
+ additional_deps = [
+ ":combinations",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:checkpoint_utils_test",
+ "//tensorflow/python:framework_ops",
+ "//tensorflow/python:training",
+ "//tensorflow/python:variable_scope",
+ "//tensorflow/python:variables",
+ ],
+ tags = [
+ "multi_and_single_gpu",
+ "no_pip",
+ ],
+)
diff --git a/tensorflow/contrib/distribute/python/checkpoint_utils_test.py b/tensorflow/contrib/distribute/python/checkpoint_utils_test.py
new file mode 100644
index 0000000000..fe3df9cbb9
--- /dev/null
+++ b/tensorflow/contrib/distribute/python/checkpoint_utils_test.py
@@ -0,0 +1,72 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for checkpoint_utils.init_from_checkpoint with Distribution Strategy.
+
+These tests are located here instead of as part of
+`python.training.CheckpointsTest` because they need access to distribution
+strategies which are only present in contrib right now.
+TODO(priyag): Move the tests to core `python.training.CheckpointsTest` when
+distribution strategy moves out of contrib.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+
+from tensorflow.contrib.distribute.python import combinations
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import checkpoint_utils
+from tensorflow.python.training import checkpoint_utils_test
+
+
+class CheckpointUtilsWithDistributionStrategyTest(
+ test.TestCase, parameterized.TestCase):
+
+ @combinations.generate(combinations.combine(
+ distribution=[combinations.default_strategy,
+ combinations.one_device_strategy,
+ combinations.mirrored_strategy_with_gpu_and_cpu,
+ combinations.mirrored_strategy_with_two_gpus],
+ in_tower_mode=[True, False],
+ mode=["graph"]))
+ def testInitFromCheckpoint(self, distribution, in_tower_mode):
+ checkpoint_dir = self.get_temp_dir()
+ with self.test_session() as session:
+ v1_value, _, _, _ = checkpoint_utils_test._create_checkpoints(
+ session, checkpoint_dir)
+
+ def init_and_verify(g):
+ v1 = variable_scope.get_variable("new_var1", [1, 10])
+ checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
+ "var1": "new_var1",
+ })
+ with self.test_session(graph=g) as session:
+ session.run(variables.global_variables_initializer())
+ self.assertAllEqual(v1_value, self.evaluate(v1))
+
+ with ops.Graph().as_default() as g, distribution.scope():
+ if in_tower_mode:
+ distribution.call_for_each_tower(init_and_verify, g)
+ else:
+ init_and_verify(g)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py b/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py
index 6a14b833d2..9807ce4351 100644
--- a/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py
+++ b/tensorflow/contrib/distribute/python/mirrored_strategy_multigpu_test.py
@@ -967,5 +967,74 @@ class MirroredAndTowerLocalVariableInitializerTest(test.TestCase):
self.evaluate(tower_local_var.initializer)
self.assertTrue(self.evaluate(tower_local_var.is_initialized()))
+
+class TowerLocalVariableAssignTest(test.TestCase):
+ config = config_pb2.ConfigProto()
+ config.allow_soft_placement = True
+
+ def _skip_eager_if_gpus_less_than(self, num_gpus):
+ if context.num_gpus() < num_gpus and context.executing_eagerly():
+ self.skipTest("Enough GPUs not available for this test in eager mode.")
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignTowerLocalVarSumAggregation(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def model_fn():
+ v_sum = variable_scope.variable(
+ 1.0,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.SUM)
+ return v_sum
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ tower_local_var = dist.call_for_each_tower(model_fn,
+ run_concurrently=False)
+ self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
+ self.evaluate(variables.global_variables_initializer())
+ # Each tower has a value of 1.0 assigned to it in tower context.
+ # When we read the value using `read_var` we should see the SUM of each of
+ # values on each of the towers.
+ self.assertEqual(2.0, self.evaluate(dist.read_var(tower_local_var)))
+ # Assigning 6.0 in cross tower context will assign a value of
+ # 6.0/num_towers to each tower.
+ tlv_ops = tower_local_var.assign(6.0)
+ self.evaluate(tlv_ops)
+ # On reading the tower local var we should get the assigned value back.
+ # The value on all the towers are added before being returned by
+ # `read_var`.
+ self.assertEqual(6.0, self.evaluate(dist.read_var(tower_local_var)))
+
+ @test_util.run_in_graph_and_eager_modes(config=config)
+ def testAssignTowerLocalVarMeanAggregation(self):
+ self._skip_eager_if_gpus_less_than(1)
+ def model_fn():
+ v_sum = variable_scope.variable(
+ 1.0,
+ synchronization=variable_scope.VariableSynchronization.ON_READ,
+ aggregation=variable_scope.VariableAggregation.MEAN)
+ return v_sum
+
+ dist = mirrored_strategy.MirroredStrategy(
+ ["/device:GPU:0", "/device:CPU:0"])
+
+ with dist.scope():
+ tower_local_var = dist.call_for_each_tower(model_fn,
+ run_concurrently=False)
+ self.assertTrue(isinstance(tower_local_var, values.TowerLocalVariable))
+ self.evaluate(variables.global_variables_initializer())
+ # Each tower has a value of 1.0 assigned to it in tower context.
+ # When we read the value using `read_var` we should see the MEAN of values
+ # on all towers which is the value assigned in tower context.
+ self.assertEqual(1.0, self.evaluate(dist.read_var(tower_local_var)))
+ tlv_ops = tower_local_var.assign(6.0)
+ self.evaluate(tlv_ops)
+ # On reading the tower local var we should get the MEAN of all values
+ # which is equal to the value assigned.
+ self.assertEqual(6.0, self.evaluate(dist.read_var(tower_local_var)))
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/contrib/distribute/python/values.py b/tensorflow/contrib/distribute/python/values.py
index 1b5e00bc79..47dcf679c2 100644
--- a/tensorflow/contrib/distribute/python/values.py
+++ b/tensorflow/contrib/distribute/python/values.py
@@ -30,10 +30,10 @@ from tensorflow.contrib.distribute.python import prefetching_ops_v2
from tensorflow.python.eager import context
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
-from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import device_util
from tensorflow.python.training import distribute as distribute_lib
@@ -78,6 +78,13 @@ class DistributedValues(object):
def devices(self):
return list(self._index.keys())
+ @property
+ def is_tensor_like(self):
+ for v in self._index.values():
+ if not tensor_util.is_tensor(v):
+ return False
+ return True
+
def __str__(self):
return "%s:%s" % (self.__class__.__name__, self._index)
@@ -197,10 +204,43 @@ class DistributedVariable(DistributedDelegate):
# to the container without introducing a reference cycle.
for v in six.itervalues(index):
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
+ # tf.keras keeps track of variables initialized using this attribute. When
+ # tf.keras gets the default session, it initializes all uninitialized vars.
+ # We need to make _keras_initialized a member of DistributedVariable because
+ # without this it will use `__getattr__` which will delegate to a component
+ # variable.
+ self._keras_initialized = False
super(DistributedVariable, self).__init__(index)
+ def is_initialized(self, name=None):
+ """Identifies if all the component variables are initialized.
+
+ Args:
+ name: Name of the final `logical_and` op.
+
+ Returns:
+ The op that evaluates to True or False depending on if all the
+ component variables are initialized.
+ """
+ # We have to cast the self._index.values() to a `list` because when we
+ # use `model_to_estimator` to run tf.keras models, self._index.values() is
+ # of type `dict_values` and not `list`.
+ values_list = list(self._index.values())
+ result = values_list[0].is_initialized()
+ # We iterate through the list of values except the last one to allow us to
+ # name the final `logical_and` op the same name that is passed by the user
+ # to the `is_initialized` op. For distributed variables, the
+ # `is_initialized` op is a `logical_and` op.
+ for v in values_list[1:-1]:
+ result = math_ops.logical_and(result, v.is_initialized())
+ result = math_ops.logical_and(result, values_list[-1].is_initialized(),
+ name=name)
+ return result
+
@property
def initializer(self):
+ # return grouped ops of all the var initializations of component values of
+ # the mirrored variable
return control_flow_ops.group([v.initializer for v in self._index.values()])
@property
@@ -297,12 +337,6 @@ class MirroredVariable(DistributedVariable, Mirrored,
for v in six.itervalues(index):
v._mirrored_container = weakref.ref(self) # pylint: disable=protected-access
self._primary_var = primary_var
- # tf.keras keeps track of variables initialized using this attribute. When
- # tf.keras gets the default session, it initializes all uninitialized vars.
- # We need to make _keras_initialized a member of MirroredVariable because
- # without this it will use `__getattr__` which will delegate to a component
- # variable.
- self._keras_initialized = False
self._aggregation = aggregation
super(MirroredVariable, self).__init__(index)
@@ -326,6 +360,7 @@ class MirroredVariable(DistributedVariable, Mirrored,
return distribute_lib.get_distribution_strategy().update(
self, f, *args, **kwargs)
else:
+ _assert_tower_context()
# We are calling an assign function on the mirrored variable in tower
# context.
# We reduce the value we want to assign/add/sub. More details about how we
@@ -336,45 +371,27 @@ class MirroredVariable(DistributedVariable, Mirrored,
raise ValueError("You must specify an aggregation method to update a "
"MirroredVariable in Tower Context.")
- def merge_fn(strategy, value):
+ def merge_fn(strategy, value, *other_args, **other_kwargs):
return strategy.update(
self, f,
strategy.reduce(
- aggregation=self._aggregation, value=value, destinations=self))
+ aggregation=self._aggregation, value=value, destinations=self),
+ *other_args, **other_kwargs)
return distribute_lib.get_tower_context().merge_call(merge_fn, *args,
**kwargs)
def assign_sub(self, *args, **kwargs):
- return self._assign_func(f=state_ops.assign_sub, *args, **kwargs)
+ assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw)
+ return self._assign_func(f=assign_sub_fn, *args, **kwargs)
def assign_add(self, *args, **kwargs):
- return self._assign_func(f=state_ops.assign_add, *args, **kwargs)
+ assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw)
+ return self._assign_func(f=assign_add_fn, *args, **kwargs)
def assign(self, *args, **kwargs):
- return self._assign_func(f=state_ops.assign, *args, **kwargs)
-
- def is_initialized(self, name=None):
- # We have to cast the self._index.values() to a `list` because when we
- # use `model_to_estimator` to run tf.keras models, self._index.values() is
- # of type `dict_values` and not `list`.
- values_list = list(self._index.values())
- result = values_list[0].is_initialized()
- # We iterate through the list of values except the last one to allow us to
- # name the final `logical_and` op the same name that is passed by the user
- # to the `is_initialized` op. For mirrored variables, the `is_initialized`
- # op is a `logical_and` op.
- for v in values_list[1:-1]:
- result = math_ops.logical_and(result, v.is_initialized())
- result = math_ops.logical_and(result, values_list[-1].is_initialized(),
- name=name)
- return result
-
- @property
- def initializer(self):
- # return grouped ops of all the var initializations of component values of
- # the mirrored variable
- return control_flow_ops.group([v.initializer for v in self._index.values()])
+ assign_fn = lambda var, *a, **kw: var.assign(*a, **kw)
+ return self._assign_func(f=assign_fn, *args, **kwargs)
@property
def aggregation(self):
@@ -440,14 +457,7 @@ class _TowerLocalSaveable(saver.BaseSaverBuilder.SaveableObject):
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into all variables."""
tensor, = restored_tensors
- # To preserve the sum across save and restore, we have to divide the
- # total across all devices when restoring a variable that was summed
- # when saving.
- if self._tower_local_variable.aggregation == vs.VariableAggregation.SUM:
- tensor *= 1. / len(self._tower_local_variable.devices)
- return control_flow_ops.group([
- _assign_on_device(d, v, tensor)
- for d, v in six.iteritems(self._tower_local_variable._index)]) # pylint: disable=protected-access
+ return self._tower_local_variable.assign(tensor)
def _assert_tower_context():
@@ -463,12 +473,6 @@ class TowerLocalVariable(DistributedVariable, PerDevice,
def __init__(self, index, primary_var, aggregation):
self._primary_var = primary_var
self._aggregation = aggregation
- # tf.keras keeps track of variables initialized using this attribute. When
- # tf.keras gets the default session, it initializes all uninitialized vars.
- # We need to make _keras_initialized a member of TowerLocalVariable because
- # without this it will use `__getattr__` which will delegate to a component
- # variable.
- self._keras_initialized = False
super(TowerLocalVariable, self).__init__(index)
def assign_sub(self, *args, **kwargs):
@@ -480,30 +484,19 @@ class TowerLocalVariable(DistributedVariable, PerDevice,
return self.get().assign_add(*args, **kwargs)
def assign(self, *args, **kwargs):
- _assert_tower_context()
- return self.get().assign(*args, **kwargs)
-
- def is_initialized(self, name=None):
- # We have to cast the self._index.values() to a `list` because when we
- # use `model_to_estimator` to run tf.keras models, self._index.values() is
- # of type `dict_values` and not `list`.
- values_list = list(self._index.values())
- result = values_list[0].is_initialized()
- # We iterate through the list of values except the last one to allow us to
- # name the final `logical_and` op the same name that is passed by the user
- # to the `is_initialized` op. For tower local variables, the
- # `is_initialized` op is a `logical_and` op.
- for v in values_list[1:-1]:
- result = math_ops.logical_and(result, v.is_initialized())
- result = math_ops.logical_and(result, values_list[-1].is_initialized(),
- name=name)
- return result
-
- @property
- def initializer(self):
- # return grouped ops of all the var initializations of component values of
- # the tower local variable
- return control_flow_ops.group([v.initializer for v in self._index.values()])
+ if distribute_lib.get_cross_tower_context():
+ # To preserve the sum across save and restore, we have to divide the
+ # total across all devices when restoring a variable that was summed
+ # when saving.
+ tensor = args[0]
+ if self._aggregation == vs.VariableAggregation.SUM:
+ tensor *= 1. / len(self.devices)
+ return control_flow_ops.group(
+ [_assign_on_device(d, v, tensor)
+ for d, v in six.iteritems(self._index)])
+ else:
+ _assert_tower_context()
+ return self.get().assign(*args, **kwargs)
@property
def aggregation(self):
diff --git a/tensorflow/contrib/distribute/python/values_test.py b/tensorflow/contrib/distribute/python/values_test.py
index 8e44f2fea1..91a43d4999 100644
--- a/tensorflow/contrib/distribute/python/values_test.py
+++ b/tensorflow/contrib/distribute/python/values_test.py
@@ -32,6 +32,7 @@ from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
@@ -79,6 +80,30 @@ class DistributedValuesTest(test.TestCase):
with self.assertRaises(AssertionError):
v = values.DistributedValues({"/device:cpu:0": 42})
+ def testIsTensorLike(self):
+ with context.graph_mode(), \
+ ops.Graph().as_default(), \
+ ops.device("/device:CPU:0"):
+ one = constant_op.constant(1)
+ two = constant_op.constant(2)
+ v = values.DistributedValues({"/device:CPU:0": one, "/device:GPU:0": two})
+ self.assertEqual(two, v.get("/device:GPU:0"))
+ self.assertEqual(one, v.get())
+ self.assertTrue(v.is_tensor_like)
+ self.assertTrue(tensor_util.is_tensor(v))
+
+ def testIsTensorLikeWithAConstant(self):
+ with context.graph_mode(), \
+ ops.Graph().as_default(), \
+ ops.device("/device:CPU:0"):
+ one = constant_op.constant(1)
+ two = 2.0
+ v = values.DistributedValues({"/device:CPU:0": one, "/device:GPU:0": two})
+ self.assertEqual(two, v.get("/device:GPU:0"))
+ self.assertEqual(one, v.get())
+ self.assertFalse(v.is_tensor_like)
+ self.assertFalse(tensor_util.is_tensor(v))
+
class DistributedDelegateTest(test.TestCase):
diff --git a/tensorflow/contrib/distribute/python/warm_starting_util_test.py b/tensorflow/contrib/distribute/python/warm_starting_util_test.py
new file mode 100644
index 0000000000..d8bacdb338
--- /dev/null
+++ b/tensorflow/contrib/distribute/python/warm_starting_util_test.py
@@ -0,0 +1,97 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for warm_starting_util with Distribution Strategy.
+
+These tests are located here instead of as part of `WarmStartingUtilTest`
+because they need access to distribution strategies which are only present in
+contrib right now.
+TODO(priyag): Move the tests to core `WarmStartingUtilTest` when distribution
+strategy moves out of contrib.
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+from absl.testing import parameterized
+
+from tensorflow.contrib.distribute.python import combinations
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+from tensorflow.python.training import saver as saver_lib
+from tensorflow.python.training import warm_starting_util as ws_util
+
+
+class WarmStartingUtilWithDistributionStrategyTest(
+ test.TestCase, parameterized.TestCase):
+
+ @combinations.generate(combinations.combine(
+ distribution=[combinations.default_strategy,
+ combinations.one_device_strategy,
+ combinations.mirrored_strategy_with_gpu_and_cpu,
+ combinations.mirrored_strategy_with_two_gpus],
+ save_with_distribution=[True, False],
+ restore_with_distribution=[True, False],
+ mode=["graph"]))
+ def testWarmStart(self, distribution, save_with_distribution,
+ restore_with_distribution):
+
+ var_name = "v"
+ original_value = [[1., 2.], [3., 4.]]
+
+ # Create variable and save checkpoint from which to warm-start.
+ def create_var(g):
+ with self.test_session(graph=g) as sess:
+ var = variable_scope.get_variable(var_name, initializer=original_value)
+ sess.run(variables.global_variables_initializer())
+ saver = saver_lib.Saver()
+ ckpt_prefix = os.path.join(self.get_temp_dir(), "model")
+ saver.save(sess, ckpt_prefix, global_step=0)
+ return var, sess.run(var)
+
+ if save_with_distribution:
+ with ops.Graph().as_default() as g, distribution.scope():
+ _, prev_init_val = create_var(g)
+ else:
+ with ops.Graph().as_default() as g:
+ _, prev_init_val = create_var(g)
+
+ # Verify we initialized the values correctly.
+ self.assertAllEqual(original_value, prev_init_val)
+
+ def warm_start(g):
+ with self.test_session(graph=g) as sess:
+ # Initialize with zeros.
+ var = variable_scope.get_variable(
+ var_name, initializer=[[0., 0.], [0., 0.]])
+ ws_util.warm_start(self.get_temp_dir())
+ sess.run(variables.global_variables_initializer())
+ # Verify weights were correctly warm-started to previous values.
+ self.assertAllEqual(original_value, self.evaluate(var))
+
+ # Warm start in a new graph.
+ if restore_with_distribution:
+ with ops.Graph().as_default() as g, distribution.scope():
+ warm_start(g)
+ else:
+ with ops.Graph().as_default() as g:
+ warm_start(g)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
index b8f2a4b2c7..296e66f2b2 100644
--- a/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
+++ b/tensorflow/contrib/distributions/python/ops/bijectors/masked_autoregressive.py
@@ -514,9 +514,8 @@ def masked_autoregressive_default_template(
Masked Autoencoder for Distribution Estimation. In _International
Conference on Machine Learning_, 2015. https://arxiv.org/abs/1502.03509
"""
-
- with ops.name_scope(name, "masked_autoregressive_default_template",
- values=[log_scale_min_clip, log_scale_max_clip]):
+ name = name or "masked_autoregressive_default_template"
+ with ops.name_scope(name, values=[log_scale_min_clip, log_scale_max_clip]):
def _fn(x):
"""MADE parameterized via `masked_autoregressive_default_template`."""
# TODO(b/67594795): Better support of dynamic shape.
@@ -552,8 +551,7 @@ def masked_autoregressive_default_template(
else _clip_by_value_preserve_grad)
log_scale = which_clip(log_scale, log_scale_min_clip, log_scale_max_clip)
return shift, log_scale
- return template_ops.make_template(
- "masked_autoregressive_default_template", _fn)
+ return template_ops.make_template(name, _fn)
@deprecation.deprecated(
diff --git a/tensorflow/contrib/eager/python/datasets.py b/tensorflow/contrib/eager/python/datasets.py
index 58c548d798..e31dbbe80f 100644
--- a/tensorflow/contrib/eager/python/datasets.py
+++ b/tensorflow/contrib/eager/python/datasets.py
@@ -18,33 +18,14 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import threading
-
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.python.data.ops import iterator_ops
-from tensorflow.python.data.util import nest
-from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
-from tensorflow.python.framework import constant_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
-from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.training.saver import BaseSaverBuilder
-_uid_counter = 0
-_uid_lock = threading.Lock()
-
-
-def _generate_shared_name(prefix):
- with _uid_lock:
- global _uid_counter
- uid = _uid_counter
- _uid_counter += 1
- return "{}{}".format(prefix, uid)
-
class Iterator(iterator_ops.EagerIterator, checkpointable.CheckpointableBase):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
@@ -80,38 +61,18 @@ class Iterator(iterator_ops.EagerIterator, checkpointable.CheckpointableBase):
"`tf.contrib.eager.Iterator`. Use `for ... in dataset:` to iterate "
"over the dataset instead.")
- super(Iterator, self).__init__(dataset)
if not context.context().device_spec.device_type:
is_remote_device = False
else:
is_remote_device = context.context().device_spec.device_type != "CPU"
- self._buffer_resource_handle = None
if is_remote_device:
- with ops.device("/device:CPU:0"):
- iter_string_handle = gen_dataset_ops.iterator_to_string_handle(
- self._resource)
-
- @function.Defun(dtypes.string)
- def remote_fn(h):
- remote_iterator = iterator_ops.Iterator.from_string_handle(
- h, self.output_types, self.output_shapes, self.output_classes)
- return remote_iterator.get_next()
-
- remote_fn.add_to_graph(None)
- target = constant_op.constant("/device:CPU:0")
- with ops.device(self._device):
- self._buffer_resource_handle = prefetching_ops.function_buffering_resource( # pylint: disable=line-too-long
- string_arg=iter_string_handle,
- output_types=self._flat_output_types,
- f=remote_fn,
- target_device=target,
- buffer_size=10,
- container="",
- shared_name=_generate_shared_name(
- "contrib_eager_iterator_function_buffer_resource"))
- self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter( # pylint: disable=line-too-long
- handle=self._buffer_resource_handle,
- handle_device=self._device)
+ with ops.device(None):
+ # Let the placer figure out where to place the various functions etc.
+ # created by the CopyToDeviceDataset.
+ dataset = dataset.apply(prefetching_ops.copy_to_device(
+ context.context().device_name))
+ dataset = dataset.prefetch(1)
+ super(Iterator, self).__init__(dataset)
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
@@ -120,16 +81,7 @@ class Iterator(iterator_ops.EagerIterator, checkpointable.CheckpointableBase):
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
- if self._buffer_resource_handle is not None:
- with ops.device(self._device):
- ret = prefetching_ops.function_buffering_resource_get_next(
- function_buffer_resource=self._buffer_resource_handle,
- output_types=self._flat_output_types)
- return sparse.deserialize_sparse_tensors(
- nest.pack_sequence_as(self._output_types, ret), self._output_types,
- self._output_shapes, self._output_classes)
- else:
- return super(Iterator, self)._next_internal()
+ return super(Iterator, self)._next_internal()
# TODO(shivaniagrawal): Expose checkpointable stateful objects from dataset
# attributes(potential).
diff --git a/tensorflow/contrib/eager/python/datasets_test.py b/tensorflow/contrib/eager/python/datasets_test.py
index 68bec9aee8..acc605247f 100644
--- a/tensorflow/contrib/eager/python/datasets_test.py
+++ b/tensorflow/contrib/eager/python/datasets_test.py
@@ -193,6 +193,20 @@ class IteratorTest(test.TestCase):
x = math_ops.add(x, x)
self.assertAllEqual([0., 2.], x.numpy())
+ def testGpuTensor(self):
+ ds = Dataset.from_tensors([0., 1.])
+ with ops.device(test.gpu_device_name()):
+ for x in ds:
+ y = math_ops.add(x, x)
+ self.assertAllEqual([0., 2.], y.numpy())
+
+ def testGpuDefinedDataset(self):
+ with ops.device(test.gpu_device_name()):
+ ds = Dataset.from_tensors([0., 1.])
+ for x in ds:
+ y = math_ops.add(x, x)
+ self.assertAllEqual([0., 2.], y.numpy())
+
def testTensorsExplicitPrefetchToDevice(self):
ds = Dataset.from_tensor_slices([0., 1.])
ds = ds.apply(prefetching_ops.prefetch_to_device(test.gpu_device_name()))
diff --git a/tensorflow/contrib/eager/python/examples/densenet/BUILD b/tensorflow/contrib/eager/python/examples/densenet/BUILD
index de2a817d17..2dc196f550 100644
--- a/tensorflow/contrib/eager/python/examples/densenet/BUILD
+++ b/tensorflow/contrib/eager/python/examples/densenet/BUILD
@@ -16,6 +16,7 @@ py_binary(
cuda_py_test(
name = "densenet_test",
+ size = "large",
srcs = ["densenet_test.py"],
additional_deps = [
":densenet",
@@ -27,3 +28,21 @@ cuda_py_test(
"optonly",
],
)
+
+cuda_py_test(
+ name = "densenet_graph_test",
+ size = "large",
+ srcs = ["densenet_graph_test.py"],
+ additional_deps = [
+ ":densenet",
+ "//third_party/py/numpy",
+ "//tensorflow:tensorflow_py",
+ ],
+ tags = [
+ "no_pip",
+ "noasan",
+ "nomsan",
+ "notsan",
+ "optonly",
+ ],
+)
diff --git a/tensorflow/contrib/eager/python/examples/densenet/densenet.py b/tensorflow/contrib/eager/python/examples/densenet/densenet.py
index 3a2b2de250..6de4e69400 100644
--- a/tensorflow/contrib/eager/python/examples/densenet/densenet.py
+++ b/tensorflow/contrib/eager/python/examples/densenet/densenet.py
@@ -32,24 +32,28 @@ class ConvBlock(tf.keras.Model):
Arguments:
num_filters: number of filters passed to a convolutional layer.
+ data_format: "channels_first" or "channels_last"
bottleneck: if True, then a 1x1 Conv is performed followed by 3x3 Conv.
weight_decay: weight decay
dropout_rate: dropout rate.
"""
- def __init__(self, num_filters, bottleneck, weight_decay=1e-4,
+ def __init__(self, num_filters, data_format, bottleneck, weight_decay=1e-4,
dropout_rate=0):
super(ConvBlock, self).__init__()
self.bottleneck = bottleneck
+
+ axis = -1 if data_format == "channels_last" else 1
inter_filter = num_filters * 4
# don't forget to set use_bias=False when using batchnorm
self.conv2 = tf.keras.layers.Conv2D(num_filters,
(3, 3),
padding="same",
use_bias=False,
+ data_format=data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(weight_decay))
- self.batchnorm1 = tf.keras.layers.BatchNormalization()
+ self.batchnorm1 = tf.keras.layers.BatchNormalization(axis=axis)
self.dropout = tf.keras.layers.Dropout(dropout_rate)
if self.bottleneck:
@@ -57,9 +61,10 @@ class ConvBlock(tf.keras.Model):
(1, 1),
padding="same",
use_bias=False,
+ data_format=data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(weight_decay))
- self.batchnorm2 = tf.keras.layers.BatchNormalization()
+ self.batchnorm2 = tf.keras.layers.BatchNormalization(axis=axis)
def call(self, x, training=True):
output = self.batchnorm1(x, training=training)
@@ -79,20 +84,25 @@ class TransitionBlock(tf.keras.Model):
Arguments:
num_filters: number of filters passed to a convolutional layer.
+ data_format: "channels_first" or "channels_last"
weight_decay: weight decay
dropout_rate: dropout rate.
"""
- def __init__(self, num_filters, weight_decay=1e-4, dropout_rate=0):
+ def __init__(self, num_filters, data_format,
+ weight_decay=1e-4, dropout_rate=0):
super(TransitionBlock, self).__init__()
- self.batchnorm = tf.keras.layers.BatchNormalization()
+ axis = -1 if data_format == "channels_last" else 1
+
+ self.batchnorm = tf.keras.layers.BatchNormalization(axis=axis)
self.conv = tf.keras.layers.Conv2D(num_filters,
(1, 1),
padding="same",
use_bias=False,
+ data_format=data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(weight_decay))
- self.avg_pool = tf.keras.layers.AveragePooling2D()
+ self.avg_pool = tf.keras.layers.AveragePooling2D(data_format=data_format)
def call(self, x, training=True):
output = self.batchnorm(x, training=training)
@@ -108,19 +118,22 @@ class DenseBlock(tf.keras.Model):
Arguments:
num_layers: Number of layers in each block.
growth_rate: number of filters to add per conv block.
+ data_format: "channels_first" or "channels_last"
bottleneck: boolean, that decides which part of ConvBlock to call.
weight_decay: weight decay
dropout_rate: dropout rate.
"""
- def __init__(self, num_layers, growth_rate, bottleneck,
+ def __init__(self, num_layers, growth_rate, data_format, bottleneck,
weight_decay=1e-4, dropout_rate=0):
super(DenseBlock, self).__init__()
self.num_layers = num_layers
+ self.axis = -1 if data_format == "channels_last" else 1
self.blocks = []
for _ in range(int(self.num_layers)):
self.blocks.append(ConvBlock(growth_rate,
+ data_format,
bottleneck,
weight_decay,
dropout_rate))
@@ -128,7 +141,7 @@ class DenseBlock(tf.keras.Model):
def call(self, x, training=True):
for i in range(int(self.num_layers)):
output = self.blocks[i](x, training=training)
- x = tf.concat([x, output], axis=-1)
+ x = tf.concat([x, output], axis=self.axis)
return x
@@ -146,6 +159,7 @@ class DenseNet(tf.keras.Model):
If positive integer, then the it is used as the
number of layers per block.
If list or tuple, then this list is used directly.
+ data_format: "channels_first" or "channels_last"
bottleneck: boolean, to decide which part of conv block to call.
compression: reducing the number of inputs(filters) to the transition block.
weight_decay: weight decay
@@ -157,7 +171,7 @@ class DenseNet(tf.keras.Model):
"""
def __init__(self, depth_of_model, growth_rate, num_of_blocks,
- output_classes, num_layers_in_each_block,
+ output_classes, num_layers_in_each_block, data_format,
bottleneck=True, compression=0.5, weight_decay=1e-4,
dropout_rate=0, pool_initial=False, include_top=True):
super(DenseNet, self).__init__()
@@ -166,6 +180,7 @@ class DenseNet(tf.keras.Model):
self.num_of_blocks = num_of_blocks
self.output_classes = output_classes
self.num_layers_in_each_block = num_layers_in_each_block
+ self.data_format = data_format
self.bottleneck = bottleneck
self.compression = compression
self.weight_decay = weight_decay
@@ -193,6 +208,8 @@ class DenseNet(tf.keras.Model):
self.num_layers_in_each_block = [
self.num_layers_in_each_block] * self.num_of_blocks
+ axis = -1 if self.data_format == "channels_last" else 1
+
# setting the filters and stride of the initial covn layer.
if self.pool_initial:
init_filters = (7, 7)
@@ -209,20 +226,23 @@ class DenseNet(tf.keras.Model):
strides=stride,
padding="same",
use_bias=False,
+ data_format=self.data_format,
kernel_initializer="he_normal",
kernel_regularizer=l2(
self.weight_decay))
if self.pool_initial:
self.pool1 = tf.keras.layers.MaxPooling2D(pool_size=(3, 3),
strides=(2, 2),
- padding="same")
- self.batchnorm1 = tf.keras.layers.BatchNormalization()
+ padding="same",
+ data_format=self.data_format)
+ self.batchnorm1 = tf.keras.layers.BatchNormalization(axis=axis)
- self.batchnorm2 = tf.keras.layers.BatchNormalization()
+ self.batchnorm2 = tf.keras.layers.BatchNormalization(axis=axis)
# last pooling and fc layer
if self.include_top:
- self.last_pool = tf.keras.layers.GlobalAveragePooling2D()
+ self.last_pool = tf.keras.layers.GlobalAveragePooling2D(
+ data_format=self.data_format)
self.classifier = tf.keras.layers.Dense(self.output_classes)
# calculating the number of filters after each block
@@ -241,12 +261,14 @@ class DenseNet(tf.keras.Model):
for i in range(self.num_of_blocks):
self.dense_blocks.append(DenseBlock(self.num_layers_in_each_block[i],
self.growth_rate,
+ self.data_format,
self.bottleneck,
self.weight_decay,
self.dropout_rate))
if i+1 < self.num_of_blocks:
self.transition_blocks.append(
TransitionBlock(num_filters_after_each_block[i+1],
+ self.data_format,
self.weight_decay,
self.dropout_rate))
diff --git a/tensorflow/contrib/eager/python/examples/densenet/densenet_graph_test.py b/tensorflow/contrib/eager/python/examples/densenet/densenet_graph_test.py
new file mode 100644
index 0000000000..bd0057fb1a
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/densenet/densenet_graph_test.py
@@ -0,0 +1,149 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests and Benchmarks for Densenet model under graph execution."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import time
+import numpy as np
+import tensorflow as tf
+
+from tensorflow.contrib.eager.python.examples.densenet import densenet
+
+
+def data_format():
+ return 'channels_first' if tf.test.is_gpu_available() else 'channels_last'
+
+
+def image_shape(batch_size):
+ if data_format() == 'channels_first':
+ return [batch_size, 3, 224, 224]
+ return [batch_size, 224, 224, 3]
+
+
+def random_batch(batch_size):
+ images = np.random.rand(*image_shape(batch_size)).astype(np.float32)
+ num_classes = 1000
+ labels = np.random.randint(
+ low=0, high=num_classes, size=[batch_size]).astype(np.int32)
+ one_hot = np.zeros((batch_size, num_classes)).astype(np.float32)
+ one_hot[np.arange(batch_size), labels] = 1.
+ return images, one_hot
+
+
+class DensenetGraphTest(tf.test.TestCase):
+
+ def testApply(self):
+ depth = 7
+ growth_rate = 2
+ num_blocks = 3
+ output_classes = 10
+ num_layers_in_each_block = -1
+ batch_size = 1
+ with tf.Graph().as_default():
+ images = tf.placeholder(tf.float32, image_shape(None))
+ model = densenet.DenseNet(depth, growth_rate, num_blocks,
+ output_classes, num_layers_in_each_block,
+ data_format(), bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=False, include_top=True)
+ predictions = model(images, training=False)
+
+ init = tf.global_variables_initializer()
+
+ with tf.Session() as sess:
+ sess.run(init)
+ np_images, _ = random_batch(batch_size)
+ out = sess.run(predictions, feed_dict={images: np_images})
+ self.assertAllEqual([batch_size, output_classes], out.shape)
+
+
+class DensenetBenchmark(tf.test.Benchmark):
+
+ def __init__(self):
+ self.depth = 121
+ self.growth_rate = 32
+ self.num_blocks = 4
+ self.output_classes = 1000
+ self.num_layers_in_each_block = [6, 12, 24, 16]
+
+ def _report(self, label, start, num_iters, batch_size):
+ avg_time = (time.time() - start) / num_iters
+ dev = 'gpu' if tf.test.is_gpu_available() else 'cpu'
+ name = 'graph_%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format())
+ extras = {'examples_per_sec': batch_size / avg_time}
+ self.report_benchmark(
+ iters=num_iters, wall_time=avg_time, name=name, extras=extras)
+
+ def benchmark_graph_apply(self):
+ with tf.Graph().as_default():
+ images = tf.placeholder(tf.float32, image_shape(None))
+ model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
+ self.output_classes,
+ self.num_layers_in_each_block, data_format(),
+ bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=True, include_top=True)
+ predictions = model(images, training=False)
+
+ init = tf.global_variables_initializer()
+
+ batch_size = 64
+ with tf.Session() as sess:
+ sess.run(init)
+ np_images, _ = random_batch(batch_size)
+ num_burn, num_iters = (3, 30)
+ for _ in range(num_burn):
+ sess.run(predictions, feed_dict={images: np_images})
+ start = time.time()
+ for _ in range(num_iters):
+ sess.run(predictions, feed_dict={images: np_images})
+ self._report('apply', start, num_iters, batch_size)
+
+ def benchmark_graph_train(self):
+ for batch_size in [16, 32, 64]:
+ with tf.Graph().as_default():
+ np_images, np_labels = random_batch(batch_size)
+ dataset = tf.data.Dataset.from_tensors((np_images, np_labels)).repeat()
+ (images, labels) = dataset.make_one_shot_iterator().get_next()
+
+ model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
+ self.output_classes,
+ self.num_layers_in_each_block, data_format(),
+ bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=True, include_top=True)
+ logits = model(images, training=True)
+ loss = tf.losses.softmax_cross_entropy(
+ logits=logits, onehot_labels=labels)
+ optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
+ train_op = optimizer.minimize(loss)
+
+ init = tf.global_variables_initializer()
+ with tf.Session() as sess:
+ sess.run(init)
+ (num_burn, num_iters) = (5, 10)
+ for _ in range(num_burn):
+ sess.run(train_op)
+ start = time.time()
+ for _ in range(num_iters):
+ sess.run(train_op)
+ self._report('train', start, num_iters, batch_size)
+
+
+if __name__ == '__main__':
+ tf.test.main()
diff --git a/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py b/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py
index 56d3362f3b..4f19711fb8 100644
--- a/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py
+++ b/tensorflow/contrib/eager/python/examples/densenet/densenet_test.py
@@ -12,14 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Tests for various Densenet architectures."""
+"""Tests and Benchmarks for Densenet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import gc
+import time
import tensorflow as tf
+import tensorflow.contrib.eager as tfe
+
from tensorflow.contrib.eager.python.examples.densenet import densenet
+from tensorflow.python.client import device_lib
class DensenetTest(tf.test.TestCase):
@@ -31,14 +36,19 @@ class DensenetTest(tf.test.TestCase):
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
+ data_format = ('channels_first') if tf.test.is_gpu_available() else (
+ 'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
- bottleneck=True, compression=0.5,
+ data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
- rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ if data_format == 'channels_last':
+ rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ else:
+ rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
@@ -49,14 +59,19 @@ class DensenetTest(tf.test.TestCase):
output_classes = 10
num_layers_in_each_block = -1
batch_size = 1
+ data_format = ('channels_first') if tf.test.is_gpu_available() else (
+ 'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
- bottleneck=False, compression=0.5,
+ data_format, bottleneck=False, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=False, include_top=True)
- rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ if data_format == 'channels_last':
+ rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ else:
+ rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
@@ -67,17 +82,229 @@ class DensenetTest(tf.test.TestCase):
output_classes = 10
num_layers_in_each_block = [1, 2, 2, 1]
batch_size = 1
+ data_format = ('channels_first') if tf.test.is_gpu_available() else (
+ 'channels_last')
model = densenet.DenseNet(depth, growth_rate, num_blocks,
output_classes, num_layers_in_each_block,
- bottleneck=True, compression=0.5,
+ data_format, bottleneck=True, compression=0.5,
weight_decay=1e-4, dropout_rate=0,
pool_initial=True, include_top=True)
- rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ if data_format == 'channels_last':
+ rand_input = tf.random_uniform((batch_size, 32, 32, 3))
+ else:
+ rand_input = tf.random_uniform((batch_size, 3, 32, 32))
output_shape = model(rand_input).shape
self.assertEqual(output_shape, (batch_size, output_classes))
+
+def compute_gradients(model, images, labels):
+ with tf.GradientTape() as tape:
+ logits = model(images, training=True)
+ loss = tf.losses.softmax_cross_entropy(
+ logits=logits, onehot_labels=labels)
+ tf.contrib.summary.scalar(name='loss', tensor=loss)
+ return tape.gradient(loss, model.variables)
+
+
+def apply_gradients(model, optimizer, gradients):
+ optimizer.apply_gradients(zip(gradients, model.variables))
+
+
+def device_and_data_format():
+ return ('/gpu:0',
+ 'channels_first') if tf.test.is_gpu_available() else ('/cpu:0',
+ 'channels_last')
+
+
+def random_batch(batch_size, data_format):
+ shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
+ shape = (batch_size,) + shape
+
+ num_classes = 1000
+ images = tf.random_uniform(shape)
+ labels = tf.random_uniform(
+ [batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
+ one_hot = tf.one_hot(labels, num_classes)
+
+ return images, one_hot
+
+
+class MockIterator(object):
+
+ def __init__(self, tensors):
+ self._tensors = [tf.identity(x) for x in tensors]
+
+ def next(self):
+ return self._tensors
+
+
+class DensenetBenchmark(tf.test.Benchmark):
+
+ def __init__(self):
+ self.depth = 121
+ self.growth_rate = 32
+ self.num_blocks = 4
+ self.output_classes = 1000
+ self.num_layers_in_each_block = [6, 12, 24, 16]
+
+ def _train_batch_sizes(self):
+ """Choose batch sizes based on GPU capability."""
+ for device in device_lib.list_local_devices():
+ if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':
+ if 'K20' in device.physical_device_desc:
+ return (16,)
+ if 'P100' in device.physical_device_desc:
+ return (16, 32, 64)
+
+ if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':
+ return (32,)
+ return (16, 32)
+
+ def _report(self, label, start, num_iters, device, batch_size, data_format):
+ avg_time = (time.time() - start) / num_iters
+ dev = tf.DeviceSpec.from_string(device).device_type.lower()
+ name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
+ extras = {'examples_per_sec': batch_size / avg_time}
+ self.report_benchmark(
+ iters=num_iters, wall_time=avg_time, name=name, extras=extras)
+
+ def _force_device_sync(self):
+ # If this function is called in the context of a non-CPU device
+ # (e.g., inside a 'with tf.device("/gpu:0")' block)
+ # then this will force a copy from CPU->NON_CPU_DEVICE->CPU,
+ # which forces a sync. This is a roundabout way, yes.
+ tf.constant(1.).cpu()
+
+ def _benchmark_eager_apply(self, label, device_and_format, defun=False,
+ execution_mode=None, compiled=False):
+ with tfe.execution_mode(execution_mode):
+ device, data_format = device_and_format
+ model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
+ self.output_classes,
+ self.num_layers_in_each_block, data_format,
+ bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=True, include_top=True)
+ if defun:
+ model.call = tfe.defun(model.call, compiled=compiled)
+ batch_size = 64
+ num_burn = 5
+ num_iters = 30
+ with tf.device(device):
+ images, _ = random_batch(batch_size, data_format)
+ for _ in xrange(num_burn):
+ model(images, training=False).cpu()
+ if execution_mode:
+ tfe.async_wait()
+ gc.collect()
+ start = time.time()
+ for _ in xrange(num_iters):
+ model(images, training=False).cpu()
+ if execution_mode:
+ tfe.async_wait()
+ self._report(label, start, num_iters, device, batch_size, data_format)
+
+ def benchmark_eager_apply_sync(self):
+ self._benchmark_eager_apply('eager_apply', device_and_data_format(),
+ defun=False)
+
+ def benchmark_eager_apply_async(self):
+ self._benchmark_eager_apply(
+ 'eager_apply_async', device_and_data_format(), defun=False,
+ execution_mode=tfe.ASYNC)
+
+ def benchmark_eager_apply_with_defun(self):
+ self._benchmark_eager_apply('eager_apply_with_defun',
+ device_and_data_format(), defun=True)
+
+ def _benchmark_eager_train(self,
+ label,
+ make_iterator,
+ device_and_format,
+ defun=False,
+ execution_mode=None,
+ compiled=False):
+ with tfe.execution_mode(execution_mode):
+ device, data_format = device_and_format
+ for batch_size in self._train_batch_sizes():
+ (images, labels) = random_batch(batch_size, data_format)
+ model = densenet.DenseNet(self.depth, self.growth_rate, self.num_blocks,
+ self.output_classes,
+ self.num_layers_in_each_block, data_format,
+ bottleneck=True, compression=0.5,
+ weight_decay=1e-4, dropout_rate=0,
+ pool_initial=True, include_top=True)
+ optimizer = tf.train.GradientDescentOptimizer(0.1)
+ apply_grads = apply_gradients
+ if defun:
+ model.call = tfe.defun(model.call, compiled=compiled)
+ apply_grads = tfe.defun(apply_gradients, compiled=compiled)
+
+ num_burn = 3
+ num_iters = 10
+ with tf.device(device):
+ iterator = make_iterator((images, labels))
+ for _ in xrange(num_burn):
+ (images, labels) = iterator.next()
+ apply_grads(model, optimizer,
+ compute_gradients(model, images, labels))
+ if execution_mode:
+ tfe.async_wait()
+ self._force_device_sync()
+ gc.collect()
+
+ start = time.time()
+ for _ in xrange(num_iters):
+ (images, labels) = iterator.next()
+ apply_grads(model, optimizer,
+ compute_gradients(model, images, labels))
+ if execution_mode:
+ tfe.async_wait()
+ self._force_device_sync()
+ self._report(label, start, num_iters, device, batch_size, data_format)
+
+ def benchmark_eager_train_sync(self):
+ self._benchmark_eager_train('eager_train', MockIterator,
+ device_and_data_format(), defun=False)
+
+ def benchmark_eager_train_async(self):
+ self._benchmark_eager_train(
+ 'eager_train_async',
+ MockIterator,
+ device_and_data_format(),
+ defun=False,
+ execution_mode=tfe.ASYNC)
+
+ def benchmark_eager_train_with_defun(self):
+ self._benchmark_eager_train(
+ 'eager_train_with_defun', MockIterator,
+ device_and_data_format(), defun=True)
+
+ def benchmark_eager_train_datasets(self):
+
+ def make_iterator(tensors):
+ with tf.device('/device:CPU:0'):
+ ds = tf.data.Dataset.from_tensors(tensors).repeat()
+ return tfe.Iterator(ds)
+
+ self._benchmark_eager_train(
+ 'eager_train_dataset', make_iterator,
+ device_and_data_format(), defun=False)
+
+ def benchmark_eager_train_datasets_with_defun(self):
+
+ def make_iterator(tensors):
+ with tf.device('/device:CPU:0'):
+ ds = tf.data.Dataset.from_tensors(tensors).repeat()
+ return tfe.Iterator(ds)
+
+ self._benchmark_eager_train(
+ 'eager_train_dataset_with_defun', make_iterator,
+ device_and_data_format(), defun=True)
+
+
if __name__ == '__main__':
tf.enable_eager_execution()
tf.test.main()
diff --git a/tensorflow/contrib/eager/python/examples/gan/mnist.py b/tensorflow/contrib/eager/python/examples/gan/mnist.py
index cc9cf53410..9a42179299 100644
--- a/tensorflow/contrib/eager/python/examples/gan/mnist.py
+++ b/tensorflow/contrib/eager/python/examples/gan/mnist.py
@@ -29,7 +29,6 @@ import time
import tensorflow as tf
-import tensorflow.contrib.eager as tfe
from tensorflow.examples.tutorials.mnist import input_data
layers = tf.keras.layers
@@ -214,7 +213,7 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
total_generator_loss = 0.0
total_discriminator_loss = 0.0
- for (batch_index, images) in enumerate(tfe.Iterator(dataset)):
+ for (batch_index, images) in enumerate(dataset):
with tf.device('/cpu:0'):
tf.assign_add(step_counter, 1)
@@ -227,7 +226,10 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
maxval=1.,
seed=batch_index)
- with tf.GradientTape(persistent=True) as g:
+ # we can use 2 tapes or a single persistent tape.
+ # Using two tapes is memory efficient since intermediate tensors can be
+ # released between the two .gradient() calls below
+ with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise)
tf.contrib.summary.image(
'generated_images',
@@ -243,9 +245,10 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
generator_loss_val = generator_loss(discriminator_gen_outputs)
total_generator_loss += generator_loss_val
- generator_grad = g.gradient(generator_loss_val, generator.variables)
- discriminator_grad = g.gradient(discriminator_loss_val,
- discriminator.variables)
+ generator_grad = gen_tape.gradient(generator_loss_val,
+ generator.variables)
+ discriminator_grad = disc_tape.gradient(discriminator_loss_val,
+ discriminator.variables)
generator_optimizer.apply_gradients(
zip(generator_grad, generator.variables))
@@ -261,7 +264,7 @@ def train_one_epoch(generator, discriminator, generator_optimizer,
def main(_):
(device, data_format) = ('/gpu:0', 'channels_first')
- if FLAGS.no_gpu or tfe.num_gpus() <= 0:
+ if FLAGS.no_gpu or tf.contrib.eager.num_gpus() <= 0:
(device, data_format) = ('/cpu:0', 'channels_last')
print('Using device %s, and data format %s.' % (device, data_format))
@@ -287,7 +290,7 @@ def main(_):
latest_cpkt = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
if latest_cpkt:
print('Using latest checkpoint at ' + latest_cpkt)
- checkpoint = tfe.Checkpoint(**model_objects)
+ checkpoint = tf.train.Checkpoint(**model_objects)
# Restore variables on creation if a checkpoint exists.
checkpoint.restore(latest_cpkt)
diff --git a/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb b/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb
new file mode 100644
index 0000000000..44ff43a111
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb
@@ -0,0 +1,733 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "0TD5ZrvEMbhZ"
+ },
+ "source": [
+ "##### Copyright 2018 The TensorFlow Authors.\n",
+ "\n",
+ "Licensed under the Apache License, Version 2.0 (the \"License\").\n",
+ "\n",
+ "# DCGAN: An example with tf.keras and eager\n",
+ "\n",
+ "\u003ctable class=\"tfo-notebook-buttons\" align=\"left\"\u003e\u003ctd\u003e\n",
+ "\u003ca target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb\"\u003e\n",
+ " \u003cimg src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /\u003eRun in Google Colab\u003c/a\u003e \n",
+ "\u003c/td\u003e\u003ctd\u003e\n",
+ "\u003ca target=\"_blank\" href=\"https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/dcgan.ipynb\"\u003e\u003cimg width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /\u003eView source on GitHub\u003c/a\u003e\u003c/td\u003e\u003c/table\u003e"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "ITZuApL56Mny"
+ },
+ "source": [
+ "This notebook demonstrates how to generate images of handwritten digits using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager). To do so, we use Deep Convolutional Generative Adverserial Networks ([DCGAN](https://arxiv.org/pdf/1511.06434.pdf)).\n",
+ "\n",
+ "This model takes about ~30 seconds per epoch (using tf.contrib.eager.defun to create graph functions) to train on a single Tesla K80 on Colab, as of July 2018.\n",
+ "\n",
+ "Below is the output generated after training the generator and discriminator models for 150 epochs.\n",
+ "\n",
+ "![sample output](https://tensorflow.org/images/gan/dcgan.gif)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "u_2z-B3piVsw"
+ },
+ "outputs": [],
+ "source": [
+ "# to generate gifs\n",
+ "!pip install imageio"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "e1_Y75QXJS6h"
+ },
+ "source": [
+ "## Import TensorFlow and enable eager execution"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "YfIk2es3hJEd"
+ },
+ "outputs": [],
+ "source": [
+ "from __future__ import absolute_import, division, print_function\n",
+ "\n",
+ "# Import TensorFlow \u003e= 1.9 and enable eager execution\n",
+ "import tensorflow as tf\n",
+ "tf.enable_eager_execution()\n",
+ "\n",
+ "import os\n",
+ "import time\n",
+ "import numpy as np\n",
+ "import glob\n",
+ "import matplotlib.pyplot as plt\n",
+ "import PIL\n",
+ "import imageio\n",
+ "from IPython import display"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "iYn4MdZnKCey"
+ },
+ "source": [
+ "## Load the dataset\n",
+ "\n",
+ "We are going to use the MNIST dataset to train the generator and the discriminator. The generator will then generate handwritten digits."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "a4fYMGxGhrna"
+ },
+ "outputs": [],
+ "source": [
+ "(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "NFC2ghIdiZYE"
+ },
+ "outputs": [],
+ "source": [
+ "train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n",
+ "# We are normalizing the images to the range of [-1, 1]\n",
+ "train_images = (train_images - 127.5) / 127.5"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "S4PIDhoDLbsZ"
+ },
+ "outputs": [],
+ "source": [
+ "BUFFER_SIZE = 60000\n",
+ "BATCH_SIZE = 256"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "PIGN6ouoQxt3"
+ },
+ "source": [
+ "## Use tf.data to create batches and shuffle the dataset"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "-yKCCQOoJ7cn"
+ },
+ "outputs": [],
+ "source": [
+ "train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "THY-sZMiQ4UV"
+ },
+ "source": [
+ "## Write the generator and discriminator models\n",
+ "\n",
+ "* **Generator** \n",
+ " * It is responsible for **creating convincing images that are good enough to fool the discriminator**.\n",
+ " * It consists of Conv2DTranspose (Upsampling) layers. We start with a fully connected layer and upsample the image 2 times so as to reach the desired image size (mnist image size) which is (28, 28, 1). \n",
+ " * We use **leaky relu** activation except for the **last layer** which uses **tanh** activation.\n",
+ " \n",
+ "* **Discriminator**\n",
+ " * **The discriminator is responsible for classifying the fake images from the real images.**\n",
+ " * In other words, the discriminator is given generated images (from the generator) and the real MNIST images. The job of the discriminator is to classify these images into fake (generated) and real (MNIST images).\n",
+ " * **Basically the generator should be good enough to fool the discriminator that the generated images are real**."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "VGLbvBEmjK0a"
+ },
+ "outputs": [],
+ "source": [
+ "class Generator(tf.keras.Model):\n",
+ " def __init__(self):\n",
+ " super(Generator, self).__init__()\n",
+ " self.fc1 = tf.keras.layers.Dense(7*7*64, use_bias=False)\n",
+ " self.batchnorm1 = tf.keras.layers.BatchNormalization()\n",
+ " \n",
+ " self.conv1 = tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)\n",
+ " self.batchnorm2 = tf.keras.layers.BatchNormalization()\n",
+ " \n",
+ " self.conv2 = tf.keras.layers.Conv2DTranspose(32, (5, 5), strides=(2, 2), padding='same', use_bias=False)\n",
+ " self.batchnorm3 = tf.keras.layers.BatchNormalization()\n",
+ " \n",
+ " self.conv3 = tf.keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False)\n",
+ "\n",
+ " def call(self, x, training=True):\n",
+ " x = self.fc1(x)\n",
+ " x = self.batchnorm1(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = tf.reshape(x, shape=(-1, 7, 7, 64))\n",
+ "\n",
+ " x = self.conv1(x)\n",
+ " x = self.batchnorm2(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = self.conv2(x)\n",
+ " x = self.batchnorm3(x, training=training)\n",
+ " x = tf.nn.relu(x)\n",
+ "\n",
+ " x = tf.nn.tanh(self.conv3(x)) \n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "bkOfJxk5j5Hi"
+ },
+ "outputs": [],
+ "source": [
+ "class Discriminator(tf.keras.Model):\n",
+ " def __init__(self):\n",
+ " super(Discriminator, self).__init__()\n",
+ " self.conv1 = tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')\n",
+ " self.conv2 = tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')\n",
+ " self.dropout = tf.keras.layers.Dropout(0.3)\n",
+ " self.flatten = tf.keras.layers.Flatten()\n",
+ " self.fc1 = tf.keras.layers.Dense(1)\n",
+ "\n",
+ " def call(self, x, training=True):\n",
+ " x = tf.nn.leaky_relu(self.conv1(x))\n",
+ " x = self.dropout(x, training=training)\n",
+ " x = tf.nn.leaky_relu(self.conv2(x))\n",
+ " x = self.dropout(x, training=training)\n",
+ " x = self.flatten(x)\n",
+ " x = self.fc1(x)\n",
+ " return x"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "gDkA05NE6QMs"
+ },
+ "outputs": [],
+ "source": [
+ "generator = Generator()\n",
+ "discriminator = Discriminator()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "k1HpMSLImuRi"
+ },
+ "outputs": [],
+ "source": [
+ "# Defun gives 10 secs/epoch performance boost\n",
+ "generator.call = tf.contrib.eager.defun(generator.call)\n",
+ "discriminator.call = tf.contrib.eager.defun(discriminator.call)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "0FMYgY_mPfTi"
+ },
+ "source": [
+ "## Define the loss functions and the optimizer\n",
+ "\n",
+ "* **Discriminator loss**\n",
+ " * The discriminator loss function takes 2 inputs; **real images, generated images**\n",
+ " * real_loss is a sigmoid cross entropy loss of the **real images** and an **array of ones (since these are the real images)**\n",
+ " * generated_loss is a sigmoid cross entropy loss of the **generated images** and an **array of zeros (since these are the fake images)**\n",
+ " * Then the total_loss is the sum of real_loss and the generated_loss\n",
+ " \n",
+ "* **Generator loss**\n",
+ " * It is a sigmoid cross entropy loss of the generated images and an **array of ones**\n",
+ " \n",
+ "\n",
+ "* The discriminator and the generator optimizers are different since we will train them separately."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "wkMNfBWlT-PV"
+ },
+ "outputs": [],
+ "source": [
+ "def discriminator_loss(real_output, generated_output):\n",
+ " # [1,1,...,1] with real output since it is true and we want\n",
+ " # our generated examples to look like it\n",
+ " real_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.ones_like(real_output), logits=real_output)\n",
+ "\n",
+ " # [0,0,...,0] with generated images since they are fake\n",
+ " generated_loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.zeros_like(generated_output), logits=generated_output)\n",
+ "\n",
+ " total_loss = real_loss + generated_loss\n",
+ "\n",
+ " return total_loss"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "90BIcCKcDMxz"
+ },
+ "outputs": [],
+ "source": [
+ "def generator_loss(generated_output):\n",
+ " return tf.losses.sigmoid_cross_entropy(tf.ones_like(generated_output), generated_output)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "iWCn_PVdEJZ7"
+ },
+ "outputs": [],
+ "source": [
+ "discriminator_optimizer = tf.train.AdamOptimizer(1e-4)\n",
+ "generator_optimizer = tf.train.AdamOptimizer(1e-4)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "Rw1fkAczTQYh"
+ },
+ "source": [
+ "## Training\n",
+ "\n",
+ "* We start by iterating over the dataset\n",
+ "* The generator is given **noise as an input** which when passed through the generator model will output a image looking like a handwritten digit\n",
+ "* The discriminator is given the **real MNIST images as well as the generated images (from the generator)**.\n",
+ "* Next, we calculate the generator and the discriminator loss.\n",
+ "* Then, we calculate the gradients of loss with respect to both the generator and the discriminator variables (inputs) and apply those to the optimizer.\n",
+ "\n",
+ "## Generate Images\n",
+ "\n",
+ "* After training, its time to generate some images!\n",
+ "* We start by creating noise array as an input to the generator\n",
+ "* The generator will then convert the noise into handwritten images.\n",
+ "* Last step is to plot the predictions and **voila!**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "NS2GWywBbAWo"
+ },
+ "outputs": [],
+ "source": [
+ "EPOCHS = 150\n",
+ "noise_dim = 100\n",
+ "num_examples_to_generate = 100\n",
+ "\n",
+ "# keeping the random vector constant for generation (prediction) so\n",
+ "# it will be easier to see the improvement of the gan.\n",
+ "random_vector_for_generation = tf.random_normal([num_examples_to_generate,\n",
+ " noise_dim])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "RmdVsmvhPxyy"
+ },
+ "outputs": [],
+ "source": [
+ "def generate_and_save_images(model, epoch, test_input):\n",
+ " # make sure the training parameter is set to False because we\n",
+ " # don't want to train the batchnorm layer when doing inference.\n",
+ " predictions = model(test_input, training=False)\n",
+ "\n",
+ " fig = plt.figure(figsize=(10,10))\n",
+ " \n",
+ " for i in range(predictions.shape[0]):\n",
+ " plt.subplot(10, 10, i+1)\n",
+ " plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n",
+ " plt.axis('off')\n",
+ " \n",
+ " # tight_layout minimizes the overlap between 2 sub-plots\n",
+ " plt.tight_layout()\n",
+ " plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n",
+ " plt.show()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "2M7LmLtGEMQJ"
+ },
+ "outputs": [],
+ "source": [
+ "def train(dataset, epochs, noise_dim): \n",
+ " for epoch in range(epochs):\n",
+ " start = time.time()\n",
+ " \n",
+ " for images in dataset:\n",
+ " # generating noise from a uniform distribution\n",
+ " noise = tf.random_normal([BATCH_SIZE, noise_dim])\n",
+ " \n",
+ " with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n",
+ " generated_images = generator(noise, training=True)\n",
+ " \n",
+ " real_output = discriminator(images, training=True)\n",
+ " generated_output = discriminator(generated_images, training=True)\n",
+ " \n",
+ " gen_loss = generator_loss(generated_output)\n",
+ " disc_loss = discriminator_loss(real_output, generated_output)\n",
+ " \n",
+ " gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)\n",
+ " gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)\n",
+ " \n",
+ " generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.variables))\n",
+ " discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.variables))\n",
+ "\n",
+ " \n",
+ " if epoch % 10 == 0:\n",
+ " display.clear_output(wait=True)\n",
+ " generate_and_save_images(generator,\n",
+ " epoch + 1,\n",
+ " random_vector_for_generation)\n",
+ "\n",
+ " print ('Time taken for epoch {} is {} sec'.format(epoch + 1,\n",
+ " time.time()-start))\n",
+ " # generating after the final epoch\n",
+ " generate_and_save_images(generator,\n",
+ " epochs,\n",
+ " random_vector_for_generation)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "Ly3UN0SLLY2l"
+ },
+ "outputs": [],
+ "source": [
+ "train(train_dataset, EPOCHS, noise_dim)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "P4M_vIbUi7c0"
+ },
+ "source": [
+ "# Display an image using the epoch number"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "WfO5wCdclHGL"
+ },
+ "outputs": [],
+ "source": [
+ "def display_image(epoch_no):\n",
+ " plt.figure(figsize=(15,15))\n",
+ " plt.imshow(np.array(PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))))\n",
+ " plt.axis('off')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "5x3q9_Oe5q0A"
+ },
+ "outputs": [],
+ "source": [
+ "display_image(EPOCHS)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "NywiH3nL8guF"
+ },
+ "source": [
+ "## Generate a GIF of all the saved images."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "xmO0Dmu2WICn"
+ },
+ "source": [
+ "\u003c!-- TODO(markdaoust): Remove the hack when Ipython version is updated --\u003e\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "IGKQgENQ8lEI"
+ },
+ "outputs": [],
+ "source": [
+ "with imageio.get_writer('dcgan.gif', mode='I') as writer:\n",
+ " filenames = glob.glob('image*.png')\n",
+ " filenames = sorted(filenames)\n",
+ " for filename in filenames:\n",
+ " image = imageio.imread(filename)\n",
+ " writer.append_data(image)\n",
+ " # this is a hack to display the gif inside the notebook\n",
+ " os.system('mv dcgan.gif dcgan.gif.png')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "uV0yiKpzNP1b"
+ },
+ "outputs": [],
+ "source": [
+ "display.Image(filename=\"dcgan.gif.png\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {
+ "autoexec": {
+ "startup": false,
+ "wait_interval": 0
+ }
+ },
+ "colab_type": "code",
+ "id": "4UJjSnIMOzOJ"
+ },
+ "outputs": [],
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "collapsed_sections": [],
+ "default_view": {},
+ "name": "dcgan.ipynb",
+ "private_outputs": true,
+ "provenance": [
+ {
+ "file_id": "1eb0NOTQapkYs3X0v-zL1x5_LFKgDISnp",
+ "timestamp": 1527173385672
+ }
+ ],
+ "toc_visible": true,
+ "version": "0.3.2",
+ "views": {}
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb b/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
index 9c7004b049..1a5a186e7a 100644
--- a/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
+++ b/tensorflow/contrib/eager/python/examples/generative_examples/image_captioning_with_attention.ipynb
@@ -1154,7 +1154,7 @@
},
"cell_type": "code",
"source": [
- "image_url = 'https://tensorflow.org/images/imcap_prediction.png'\n",
+ "image_url = 'https://tensorflow.org/images/surf.jpg'\n",
"image_extension = image_url[-4:]\n",
"image_path = tf.keras.utils.get_file('image'+image_extension, \n",
" origin=image_url)\n",
diff --git a/tensorflow/contrib/eager/python/examples/l2hmc/README.md b/tensorflow/contrib/eager/python/examples/l2hmc/README.md
new file mode 100644
index 0000000000..d6a2ff7558
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/l2hmc/README.md
@@ -0,0 +1,54 @@
+# L2HMC with TensorFlow eager execution
+
+This folder contains an implementation of [L2HMC](https://arxiv.org/pdf/1711.09268.pdf) adapted from the released implementation by the authors. The presented implementation runs in both eager and graph mode.
+With eager execution enabled, longer sample chains can be handled compared to graph mode, since no graph is explicitly stored. Moreover, with eager execution enabled, there is no need to use a `tf.while_loop`.
+
+## What is L2HMC?
+L2HMC is an algorithm that learns a non-volume preserving transformation
+for an HMC-like sampling algorithm. More specifically, the non-volume preserving
+transformation is learned with neural nets instantiated within Normalizing Flows
+(more precisely, real-NVPs).
+
+## Content
+
+- `l2hmc.py`: Dynamics definitions and example energy functions,
+including the 2D strongly correlated Gaussian, the rough well energy function,
+and a Gaussian mixture model.
+- `l2hmc_test.py`: Unit tests and benchmarks for training a sampler on the energy functions in both eager and graph mode.
+- `neural_nets.py`: The neural net for learning the kernel on the 2D strongly correlated example.
+- `main.py`: Run to train a samplers on 2D energy landscapes.
+
+## To run
+- Make sure you have installed TensorFlow 1.9+ or the latest `tf-nightly` or `tf-nightly-gpu` pip package.
+- Execute the command
+
+```bash
+python main.py --train_dir ${PWD}/dump --use_defun
+```
+
+Specifying the optional argument `train_dir` will store event files for
+tensorboard and a plot of sampled chain from the trained sampler.
+
+Specifying the optional argument `use_defun` will let the program use compiled
+graphs when running specific sections and improve the overall speed.
+
+## Boosting Performance with `defun`
+Currently, some models may experience increased overhead with eager execution enabled.
+To improve performance, we could wrap certain functions with the decorator `@tfe.defun`.
+For example, we could wrap the function that does the sampling step:
+
+```python
+@tfe.defun
+def apply_transition(old_sample):
+ new_sample = ...
+ return new_sample
+```
+
+We could also explicitly wrap the desired function with `tfe.defun`:
+
+```python
+apply_transition = tfe.defun(apply_transition)
+```
+
+## Reference
+Generalizing Hamiltonian Monte Carlo with Neural Networks. Levy, Daniel, Hoffman, Matthew D, and Sohl-Dickstein, Jascha. International Conference on Learning Representations (ICLR), 2018.
diff --git a/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py b/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py
index 729d8525fa..14b8324e48 100644
--- a/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py
+++ b/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py
@@ -32,20 +32,28 @@ from tensorflow.contrib.eager.python.examples.l2hmc import neural_nets
class Dynamics(tf.keras.Model):
- """Dynamics engine of naive L2HMC sampler.
-
- Args:
- x_dim: dimensionality of observed data
- loglikelihood_fn: log-likelihood function of conditional probability
- n_steps: number of leapfrog steps within each transition
- eps: initial value learnable scale of step size
- """
-
- def __init__(self, x_dim, loglikelihood_fn, n_steps=25, eps=.1):
+ """Dynamics engine of naive L2HMC sampler."""
+
+ def __init__(self,
+ x_dim,
+ minus_loglikelihood_fn,
+ n_steps=25,
+ eps=.1,
+ np_seed=1):
+ """Initialization.
+
+ Args:
+ x_dim: dimensionality of observed data
+ minus_loglikelihood_fn: log-likelihood function of conditional probability
+ n_steps: number of leapfrog steps within each transition
+ eps: initial value learnable scale of step size
+ np_seed: Random seed for numpy; used to control sampled masks.
+ """
super(Dynamics, self).__init__()
+ npr.seed(np_seed)
self.x_dim = x_dim
- self.potential = loglikelihood_fn
+ self.potential = minus_loglikelihood_fn
self.n_steps = n_steps
self._construct_time()
@@ -54,7 +62,7 @@ class Dynamics(tf.keras.Model):
self.position_fn = neural_nets.GenericNet(x_dim, factor=2.)
self.momentum_fn = neural_nets.GenericNet(x_dim, factor=1.)
- self.eps = tfe.Variable(
+ self.eps = tf.Variable(
initial_value=eps, name="eps", dtype=tf.float32, trainable=True)
def apply_transition(self, position):
@@ -68,8 +76,8 @@ class Dynamics(tf.keras.Model):
position, forward=False)
# Decide direction uniformly
- forward_mask = tf.cast(
- tf.random_uniform(shape=[tf.shape(position)[0]]) > .5, tf.float32)
+ batch_size = tf.shape(position)[0]
+ forward_mask = tf.cast(tf.random_uniform((batch_size,)) > .5, tf.float32)
backward_mask = 1. - forward_mask
# Obtain proposed states
@@ -108,7 +116,6 @@ class Dynamics(tf.keras.Model):
position_post, momentum_post, logdet = lf_fn(position_post, momentum_post,
i)
sumlogdet += logdet
-
accept_prob = self._compute_accept_prob(position, momentum, position_post,
momentum_post, sumlogdet)
@@ -125,17 +132,17 @@ class Dynamics(tf.keras.Model):
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
- mask)
+ mask, mask_inv)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
- mask_inv)
+ mask_inv, mask)
sumlogdet += logdet
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
- return position, momentum, tf.reduce_sum(sumlogdet, axis=1)
+ return position, momentum, sumlogdet
def _backward_lf(self, position, momentum, i):
"""One backward augmented leapfrog step. See Appendix A in paper."""
@@ -149,17 +156,17 @@ class Dynamics(tf.keras.Model):
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
- mask)
+ mask_inv, mask)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
- mask_inv)
+ mask, mask_inv)
sumlogdet += logdet
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
- return position, momentum, tf.reduce_sum(sumlogdet, axis=1)
+ return position, momentum, sumlogdet
def _update_momentum_forward(self, position, momentum, t):
"""Update v in the forward leapfrog step."""
@@ -172,12 +179,11 @@ class Dynamics(tf.keras.Model):
momentum * tf.exp(scale) -
.5 * self.eps * (tf.exp(transformed) * grad - translation))
- return momentum, scale
+ return momentum, tf.reduce_sum(scale, axis=1)
- def _update_position_forward(self, position, momentum, t, mask):
+ def _update_position_forward(self, position, momentum, t, mask, mask_inv):
"""Update x in the forward leapfrog step."""
- mask_inv = 1. - mask
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= self.eps
@@ -186,8 +192,7 @@ class Dynamics(tf.keras.Model):
mask * position +
mask_inv * (position * tf.exp(scale) + self.eps *
(tf.exp(transformed) * momentum + translation)))
-
- return position, mask_inv * scale
+ return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _update_momentum_backward(self, position, momentum, t):
"""Update v in the backward leapfrog step. Inverting the forward update."""
@@ -200,21 +205,20 @@ class Dynamics(tf.keras.Model):
tf.exp(scale) * (momentum + .5 * self.eps *
(tf.exp(transformed) * grad - translation)))
- return momentum, scale
+ return momentum, tf.reduce_sum(scale, axis=1)
- def _update_position_backward(self, position, momentum, t, mask):
+ def _update_position_backward(self, position, momentum, t, mask, mask_inv):
"""Update x in the backward leapfrog step. Inverting the forward update."""
- mask_inv = 1. - mask
scale, translation, transformed = self.position_fn(
- [momentum, mask_inv * position, t])
+ [momentum, mask * position, t])
scale *= -self.eps
transformed *= self.eps
position = (
- mask_inv * position + mask * tf.exp(scale) *
- (position - self.eps * tf.exp(transformed) * momentum + translation))
+ mask * position + mask_inv * tf.exp(scale) *
+ (position - self.eps * (tf.exp(transformed) * momentum + translation)))
- return position, mask * scale
+ return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _compute_accept_prob(self, position, momentum, position_post,
momentum_post, sumlogdet):
@@ -222,8 +226,10 @@ class Dynamics(tf.keras.Model):
old_hamil = self.hamiltonian(position, momentum)
new_hamil = self.hamiltonian(position_post, momentum_post)
+ prob = tf.exp(tf.minimum(old_hamil - new_hamil + sumlogdet, 0.))
- return tf.exp(tf.minimum(old_hamil - new_hamil + sumlogdet, 0.))
+ # Ensure numerical stability as well as correct gradients
+ return tf.where(tf.is_finite(prob), prob, tf.zeros_like(prob))
def _construct_time(self):
"""Convert leapfrog step index into sinusoidal time."""
@@ -248,6 +254,8 @@ class Dynamics(tf.keras.Model):
self.masks = []
for _ in range(self.n_steps):
+ # Need to use npr here because tf would generated different random
+ # values across different `sess.run`
idx = npr.permutation(np.arange(self.x_dim))[:self.x_dim // 2]
mask = np.zeros((self.x_dim,))
mask[idx] = 1.
@@ -273,19 +281,15 @@ class Dynamics(tf.keras.Model):
def grad_potential(self, position, check_numerics=True):
"""Get gradient of potential function at current location."""
- if not tf.executing_eagerly():
- # TODO(lxuechen): Change this to tfe.gradients_function when it works
- grad = tf.gradients(self.potential(position), position)[0]
- else:
+ if tf.executing_eagerly():
grad = tfe.gradients_function(self.potential)(position)[0]
-
- if check_numerics:
- return tf.check_numerics(grad, message="gradient of potential")
+ else:
+ grad = tf.gradients(self.potential(position), position)[0]
return grad
-# Examples of unnormalized log density/probabilities
+# Examples of unnormalized log densities
def get_scg_energy_fn():
"""Get energy function for 2d strongly correlated Gaussian."""
@@ -295,32 +299,53 @@ def get_scg_energy_fn():
sigma_inv = tf.matrix_inverse(sigma)
def energy(x):
- """Unnormalized log density/energy of 2d strongly correlated Gaussian."""
+ """Unnormalized minus log density of 2d strongly correlated Gaussian."""
xmmu = x - mu
return .5 * tf.diag_part(
tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))
- return energy
+ return energy, mu, sigma
-def get_multivariate_gaussian_energy_fn(x_dim=2):
- """Get energy function for 2d strongly correlated Gaussian."""
-
- mu = tf.random_normal(shape=[x_dim])
- # Lower triangularize and positive diagonal
- l = tf.sigmoid(
- tf.matrix_band_part(tf.random_normal(shape=[x_dim, x_dim]), -1, 0))
- # Exploit Cholesky decomposition
- sigma = tf.matmul(l, tf.transpose(l))
- sigma *= 100. # Small covariance causes extreme numerical instability
- sigma_inv = tf.matrix_inverse(sigma)
+def get_rw_energy_fn():
+ """Get energy function for rough well distribution."""
+ # For small eta, the density underlying the rough-well energy is very close to
+ # a unit Gaussian; however, the gradient is greatly affected by the small
+ # cosine perturbations
+ eta = 1e-2
+ mu = tf.constant([0., 0.])
+ sigma = tf.constant([[1., 0.], [0., 1.]])
def energy(x):
- """Unnormalized log density/energy of 2d strongly correlated Gaussian."""
+ ip = tf.reduce_sum(x**2., axis=1)
+ return .5 * ip + eta * tf.reduce_sum(tf.cos(x / eta), axis=1)
- xmmu = x - mu
- return .5 * tf.diag_part(
- tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))
+ return energy, mu, sigma
+
+
+# Loss function
+def compute_loss(dynamics, x, scale=.1, eps=1e-4):
+ """Compute loss defined in equation (8)."""
+
+ z = tf.random_normal(tf.shape(x)) # Auxiliary variable
+ x_, _, x_accept_prob, x_out = dynamics.apply_transition(x)
+ z_, _, z_accept_prob, _ = dynamics.apply_transition(z)
+
+ # Add eps for numerical stability; following released impl
+ x_loss = tf.reduce_sum((x - x_)**2, axis=1) * x_accept_prob + eps
+ z_loss = tf.reduce_sum((z - z_)**2, axis=1) * z_accept_prob + eps
+
+ loss = tf.reduce_mean(
+ (1. / x_loss + 1. / z_loss) * scale - (x_loss + z_loss) / scale, axis=0)
+
+ return loss, x_out, x_accept_prob
+
+
+def loss_and_grads(dynamics, x, loss_fn=compute_loss):
+ """Obtain loss value and gradients."""
+ with tf.GradientTape() as tape:
+ loss_val, out, accept_prob = loss_fn(dynamics, x)
+ grads = tape.gradient(loss_val, dynamics.trainable_variables)
- return energy
+ return loss_val, grads, out, accept_prob
diff --git a/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py b/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py
index e33b4cae4c..9557479885 100644
--- a/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py
+++ b/tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py
@@ -37,63 +37,37 @@ def get_default_hparams():
n_warmup_iters=3)
-# Relevant functions for benchmarking
-def compute_loss(dynamics, x, scale=.1, eps=1e-4):
- """Compute loss defined in equation (8)."""
-
- z = tf.random_normal(tf.shape(x))
- x_, _, x_accept_prob, x_out = dynamics.apply_transition(x)
- z_, _, z_accept_prob, _ = dynamics.apply_transition(z)
-
- # Add eps for numerical stability; following released impl
- x_loss = tf.reduce_sum((x - x_)**2, axis=1) * x_accept_prob + eps
- z_loss = tf.reduce_sum((z - z_)**2, axis=1) * z_accept_prob + eps
-
- loss = tf.reduce_mean(
- (1. / x_loss + 1. / z_loss) * scale - (x_loss + z_loss) / scale, axis=0)
-
- return loss, x_out
-
-
-def loss_and_grads(dynamics, x, loss_fn=compute_loss):
- """Obtain loss value and gradients."""
-
- with tf.GradientTape() as tape:
- loss_val, x_out = loss_fn(dynamics, x)
- grads = tape.gradient(loss_val, dynamics.variables)
-
- return loss_val, grads, x_out
-
-
-def warmup(dynamics, optimizer, n_iters=1, n_samples=200, loss_fn=compute_loss):
+def warmup(dynamics,
+ optimizer,
+ n_iters=1,
+ n_samples=200,
+ loss_fn=l2hmc.compute_loss):
"""Warmup optimization to reduce overhead."""
samples = tf.random_normal(
shape=[n_samples, dynamics.x_dim], dtype=tf.float32)
for _ in range(n_iters):
- _, grads, samples = loss_and_grads(dynamics, samples, loss_fn=loss_fn)
+ _, grads, samples, _ = l2hmc.loss_and_grads(
+ dynamics, samples, loss_fn=loss_fn)
optimizer.apply_gradients(zip(grads, dynamics.variables))
def fit(dynamics,
samples,
optimizer,
- loss_fn=compute_loss,
+ loss_fn=l2hmc.compute_loss,
n_iters=5000,
verbose=True,
- logdir=None,
- decay_lr=True):
+ logdir=None):
"""Fit L2HMC sampler with given log-likelihood function."""
if logdir:
summary_writer = tf.contrib.summary.create_file_writer(logdir)
for i in range(n_iters):
- loss, grads, samples = loss_and_grads(dynamics, samples, loss_fn=loss_fn)
- # TODO(lxuechen): Proper learning rate decay
- if decay_lr:
- grads = [grad * .96**(i // 1000) for grad in grads]
+ loss, grads, samples, _ = l2hmc.loss_and_grads(
+ dynamics, samples, loss_fn=loss_fn)
optimizer.apply_gradients(zip(grads, dynamics.variables))
if verbose:
print("Iteration %d: loss %.4f" % (i, loss))
@@ -112,9 +86,10 @@ class L2hmcTest(tf.test.TestCase):
# Eager mode testing
hparams = get_default_hparams()
+ energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
- loglikelihood_fn=l2hmc.get_scg_energy_fn(),
+ minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])
@@ -127,9 +102,10 @@ class L2hmcTest(tf.test.TestCase):
# Graph mode testing
with tf.Graph().as_default():
+ energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
- loglikelihood_fn=l2hmc.get_scg_energy_fn(),
+ minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
@@ -150,32 +126,20 @@ class L2hmcTest(tf.test.TestCase):
class L2hmcBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for l2hmc."""
- def _get_energy_fn(self):
- """Get specific energy function according to FLAGS."""
-
- if FLAGS.energy_fn == "scg":
- energy_fn = l2hmc.get_scg_energy_fn()
- elif FLAGS.energy_fn == "multivariate_gaussian":
- energy_fn = l2hmc.get_multivariate_gaussian_energy_fn(x_dim=FLAGS.x_dim)
- else:
- raise ValueError("No such energy function %s" % FLAGS.energy_fn)
-
- return energy_fn
-
def benchmark_graph(self):
"""Benchmark Graph performance."""
hparams = get_default_hparams()
tf.reset_default_graph()
with tf.Graph().as_default():
- energy_fn = self._get_energy_fn()
+ energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
- loglikelihood_fn=energy_fn,
+ minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
- loss, x_out = compute_loss(dynamics, x)
+ loss, x_out, _ = l2hmc.compute_loss(dynamics, x)
global_step = tf.Variable(0., name="global_step", trainable=False)
learning_rate = tf.train.exponential_decay(
@@ -183,7 +147,11 @@ class L2hmcBenchmark(tf.test.Benchmark):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
- with tf.Session() as sess:
+ # Single thread; fairer comparison against eager
+ session_conf = tf.ConfigProto(
+ intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
+
+ with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
# Warmup to reduce initialization effect when timing
@@ -218,14 +186,14 @@ class L2hmcBenchmark(tf.test.Benchmark):
"""Benchmark Eager performance."""
hparams = get_default_hparams()
- energy_fn = self._get_energy_fn()
+ energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
- loglikelihood_fn=energy_fn,
+ minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
- loss_fn = tfe.defun(compute_loss) if defun else compute_loss
+ loss_fn = tfe.defun(l2hmc.compute_loss) if defun else l2hmc.compute_loss
# Warmup to reduce initialization effect when timing
warmup(dynamics, optimizer, n_iters=hparams.n_warmup_iters, loss_fn=loss_fn)
@@ -234,12 +202,7 @@ class L2hmcBenchmark(tf.test.Benchmark):
samples = tf.random_normal(
shape=[hparams.n_samples, hparams.x_dim], dtype=tf.float32)
start_time = time.time()
- fit(dynamics,
- samples,
- optimizer,
- loss_fn=loss_fn,
- n_iters=hparams.n_iters,
- decay_lr=True)
+ fit(dynamics, samples, optimizer, loss_fn=loss_fn, n_iters=hparams.n_iters)
wall_time = time.time() - start_time
examples_per_sec = hparams.n_samples / wall_time
@@ -251,14 +214,8 @@ class L2hmcBenchmark(tf.test.Benchmark):
wall_time=wall_time)
del dynamics
- del loss_fn
if __name__ == "__main__":
- tf.flags.DEFINE_string("energy_fn", "scg",
- ("The energy function/unnormalized log-probability. "
- "Either be `scg` or `multivariate_gaussian`"))
- tf.flags.DEFINE_integer("x_dim", 2, "Dimensionality of observation space.")
- FLAGS = tf.flags.FLAGS
tf.enable_eager_execution()
tf.test.main()
diff --git a/tensorflow/contrib/eager/python/examples/l2hmc/main.py b/tensorflow/contrib/eager/python/examples/l2hmc/main.py
new file mode 100644
index 0000000000..45e1f98429
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/l2hmc/main.py
@@ -0,0 +1,235 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""L2HMC on simple Gaussian mixture model with TensorFlow eager."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import sys
+
+from absl import flags
+import numpy as np
+import tensorflow as tf
+from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
+try:
+ import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
+ HAS_MATPLOTLIB = True
+except ImportError:
+ HAS_MATPLOTLIB = False
+tfe = tf.contrib.eager
+
+
+def main(_):
+ tf.enable_eager_execution()
+ global_step = tf.train.get_or_create_global_step()
+ global_step.assign(1)
+
+ energy_fn, mean, covar = {
+ "scg": l2hmc.get_scg_energy_fn(),
+ "rw": l2hmc.get_rw_energy_fn()
+ }[FLAGS.energy_fn]
+
+ x_dim = 2
+ train_iters = 5000
+ eval_iters = 2000
+ eps = 0.1
+ n_steps = 10 # Chain length
+ n_samples = 200
+ record_loss_every = 100
+
+ dynamics = l2hmc.Dynamics(
+ x_dim=x_dim, minus_loglikelihood_fn=energy_fn, n_steps=n_steps, eps=eps)
+ learning_rate = tf.train.exponential_decay(
+ 1e-3, global_step, 1000, 0.96, staircase=True)
+ optimizer = tf.train.AdamOptimizer(learning_rate)
+ checkpointer = tf.train.Checkpoint(
+ optimizer=optimizer, dynamics=dynamics, global_step=global_step)
+
+ if FLAGS.train_dir:
+ summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
+ if FLAGS.restore:
+ latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
+ checkpointer.restore(latest_path)
+ print("Restored latest checkpoint at path:\"{}\" ".format(latest_path))
+ sys.stdout.flush()
+
+ if not FLAGS.restore:
+ # Training
+ if FLAGS.use_defun:
+ # Use `tfe.deun` to boost performance when there are lots of small ops
+ loss_fn = tfe.defun(l2hmc.compute_loss)
+ else:
+ loss_fn = l2hmc.compute_loss
+
+ samples = tf.random_normal(shape=[n_samples, x_dim])
+ for i in range(1, train_iters + 1):
+ loss, samples, accept_prob = train_one_iter(
+ dynamics,
+ samples,
+ optimizer,
+ loss_fn=loss_fn,
+ global_step=global_step)
+
+ if i % record_loss_every == 0:
+ print("Iteration {}, loss {:.4f}, x_accept_prob {:.4f}".format(
+ i, loss.numpy(),
+ accept_prob.numpy().mean()))
+ if FLAGS.train_dir:
+ with summary_writer.as_default():
+ with tf.contrib.summary.always_record_summaries():
+ tf.contrib.summary.scalar("Training loss", loss, step=global_step)
+ print("Training complete.")
+ sys.stdout.flush()
+
+ if FLAGS.train_dir:
+ saved_path = checkpointer.save(
+ file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
+ print("Saved checkpoint at path: \"{}\" ".format(saved_path))
+ sys.stdout.flush()
+
+ # Evaluation
+ if FLAGS.use_defun:
+ # Use tfe.deun to boost performance when there are lots of small ops
+ apply_transition = tfe.defun(dynamics.apply_transition)
+ else:
+ apply_transition = dynamics.apply_transition
+
+ samples = tf.random_normal(shape=[n_samples, x_dim])
+ samples_history = []
+ for i in range(eval_iters):
+ samples_history.append(samples.numpy())
+ _, _, _, samples = apply_transition(samples)
+ samples_history = np.array(samples_history)
+ print("Sampling complete.")
+ sys.stdout.flush()
+
+ # Mean and covariance of target distribution
+ mean = mean.numpy()
+ covar = covar.numpy()
+ ac_spectrum = compute_ac_spectrum(samples_history, mean, covar)
+ print("First 25 entries of the auto-correlation spectrum: {}".format(
+ ac_spectrum[:25]))
+ ess = compute_ess(ac_spectrum)
+ print("Effective sample size per Metropolis-Hastings step: {}".format(ess))
+ sys.stdout.flush()
+
+ if FLAGS.train_dir:
+ # Plot autocorrelation spectrum in tensorboard
+ plot_step = tfe.Variable(1, trainable=False, dtype=tf.int64)
+
+ for ac in ac_spectrum:
+ with summary_writer.as_default():
+ with tf.contrib.summary.always_record_summaries():
+ tf.contrib.summary.scalar("Autocorrelation", ac, step=plot_step)
+ plot_step.assign(plot_step + n_steps)
+
+ if HAS_MATPLOTLIB:
+ # Choose a single chain and plot the trajectory
+ single_chain = samples_history[:, 0, :]
+ xs = single_chain[:100, 0]
+ ys = single_chain[:100, 1]
+ plt.figure()
+ plt.plot(xs, ys, color="orange", marker="o", alpha=0.6) # Trained chain
+ plt.savefig(os.path.join(FLAGS.train_dir, "single_chain.png"))
+
+
+def train_one_iter(dynamics,
+ x,
+ optimizer,
+ loss_fn=l2hmc.compute_loss,
+ global_step=None):
+ """Train the sampler for one iteration."""
+ loss, grads, out, accept_prob = l2hmc.loss_and_grads(
+ dynamics, x, loss_fn=loss_fn)
+ optimizer.apply_gradients(
+ zip(grads, dynamics.trainable_variables), global_step=global_step)
+
+ return loss, out, accept_prob
+
+
+def compute_ac_spectrum(samples_history, target_mean, target_covar):
+ """Compute autocorrelation spectrum.
+
+ Follows equation 15 from the L2HMC paper.
+
+ Args:
+ samples_history: Numpy array of shape [T, B, D], where T is the total
+ number of time steps, B is the batch size, and D is the dimensionality
+ of sample space.
+ target_mean: 1D Numpy array of the mean of target(true) distribution.
+ target_covar: 2D Numpy array representing a symmetric matrix for variance.
+ Returns:
+ Autocorrelation spectrum, Numpy array of shape [T-1].
+ """
+
+ # Using numpy here since eager is a bit slow due to the loop
+ time_steps = samples_history.shape[0]
+ trace = np.trace(target_covar)
+
+ rhos = []
+ for t in range(time_steps - 1):
+ rho_t = 0.
+ for tau in range(time_steps - t):
+ v_tau = samples_history[tau, :, :] - target_mean
+ v_tau_plus_t = samples_history[tau + t, :, :] - target_mean
+ # Take dot product over observation dims and take mean over batch dims
+ rho_t += np.mean(np.sum(v_tau * v_tau_plus_t, axis=1))
+
+ rho_t /= trace * (time_steps - t)
+ rhos.append(rho_t)
+
+ return np.array(rhos)
+
+
+def compute_ess(ac_spectrum):
+ """Compute the effective sample size based on autocorrelation spectrum.
+
+ This follows equation 16 from the L2HMC paper.
+
+ Args:
+ ac_spectrum: Autocorrelation spectrum
+ Returns:
+ The effective sample size
+ """
+ # Cutoff from the first value less than 0.05
+ cutoff = np.argmax(ac_spectrum[1:] < .05)
+ if cutoff == 0:
+ cutoff = len(ac_spectrum)
+ ess = 1. / (1. + 2. * np.sum(ac_spectrum[1:cutoff]))
+ return ess
+
+
+if __name__ == "__main__":
+ flags.DEFINE_string(
+ "train_dir",
+ default=None,
+ help="[Optional] Directory to store the training information")
+ flags.DEFINE_boolean(
+ "restore",
+ default=False,
+ help="[Optional] Restore the latest checkpoint from `train_dir` if True")
+ flags.DEFINE_boolean(
+ "use_defun",
+ default=False,
+ help="[Optional] Use `tfe.defun` to boost performance")
+ flags.DEFINE_string(
+ "energy_fn",
+ default="scg",
+ help="[Optional] The energy function used for experimentation"
+ "Other options include `rw`")
+ FLAGS = flags.FLAGS
+ tf.app.run(main)
diff --git a/tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py b/tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py
index e230ad5e25..68e0bc3123 100644
--- a/tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py
+++ b/tensorflow/contrib/eager/python/examples/l2hmc/neural_nets.py
@@ -25,7 +25,6 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
-import tensorflow.contrib.eager as tfe
class GenericNet(tf.keras.Model):
@@ -47,13 +46,13 @@ class GenericNet(tf.keras.Model):
# Scale
self.scale_layer = _custom_dense(x_dim, .001)
- self.coeff_scale = tfe.Variable(
+ self.coeff_scale = tf.Variable(
initial_value=tf.zeros([1, x_dim]), name='coeff_scale', trainable=True)
# Translation
self.translation_layer = _custom_dense(x_dim, factor=.001)
# Transformation
self.transformation_layer = _custom_dense(x_dim, .001)
- self.coeff_transformation = tfe.Variable(
+ self.coeff_transformation = tf.Variable(
initial_value=tf.zeros([1, x_dim]),
name='coeff_transformation',
trainable=True)
diff --git a/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb b/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb
index 591e2d0c85..5f1b48fa0d 100644
--- a/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb
+++ b/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb
@@ -118,7 +118,6 @@
"cell_type": "code",
"source": [
"import tensorflow as tf\n",
- "tfe = tf.contrib.eager # Shorthand for some symbols\n",
"\n",
"tf.enable_eager_execution()"
],
@@ -184,7 +183,7 @@
},
"cell_type": "code",
"source": [
- "v = tfe.Variable(1.0)\n",
+ "v = tf.Variable(1.0)\n",
"assert v.numpy() == 1.0\n",
"\n",
"# Re-assign the value\n",
@@ -258,8 +257,8 @@
" def __init__(self):\n",
" # Initialize variable to (5.0, 0.0)\n",
" # In practice, these should be initialized to random values.\n",
- " self.W = tfe.Variable(5.0)\n",
- " self.b = tfe.Variable(0.0)\n",
+ " self.W = tf.Variable(5.0)\n",
+ " self.b = tf.Variable(0.0)\n",
" \n",
" def __call__(self, x):\n",
" return self.W * x + self.b\n",
diff --git a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
index b14ef1df8f..07d8788882 100644
--- a/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
+++ b/tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py
@@ -29,6 +29,7 @@ import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.client import device_lib
+from tensorflow.python.eager import tape
def device_and_data_format():
@@ -49,13 +50,21 @@ def random_batch(batch_size, data_format):
return images, one_hot
-def compute_gradients(model, images, labels):
- with tf.GradientTape() as tape:
+def compute_gradients(model, images, labels, num_replicas=1):
+ with tf.GradientTape() as grad_tape:
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
- return tape.gradient(loss, model.variables)
+ if num_replicas != 1:
+ loss /= num_replicas
+
+ # TODO(b/110991947): We can mistakenly trace the gradient call in
+ # multi-threaded environment. Explicitly disable recording until
+ # this is fixed.
+ with tape.stop_recording():
+ grads = grad_tape.gradient(loss, model.variables)
+ return grads
def apply_gradients(model, optimizer, gradients):
@@ -188,11 +197,14 @@ class ResNet50Benchmarks(tf.test.Benchmark):
return (32,)
return (16, 32)
- def _report(self, label, start, num_iters, device, batch_size, data_format):
+ def _report(self, label, start, num_iters, device, batch_size, data_format,
+ num_replicas=1):
avg_time = (time.time() - start) / num_iters
dev = tf.DeviceSpec.from_string(device).device_type.lower()
- name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
- extras = {'examples_per_sec': batch_size / avg_time}
+ replica_str = '' if num_replicas == 1 else 'replicas_%d_' % num_replicas
+ name = '%s_%s_batch_%d_%s%s' % (label, dev, batch_size,
+ replica_str, data_format)
+ extras = {'examples_per_sec': (num_replicas * batch_size) / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/BUILD b/tensorflow/contrib/eager/python/examples/revnet/BUILD
index 0c0e4c0eb9..3316dc1114 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/BUILD
+++ b/tensorflow/contrib/eager/python/examples/revnet/BUILD
@@ -113,3 +113,39 @@ py_binary(
"//tensorflow:tensorflow_py",
],
)
+
+py_binary(
+ name = "main_estimator",
+ srcs = ["main_estimator.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":cifar_input",
+ ":main",
+ ":revnet",
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
+py_library(
+ name = "main_estimator_lib",
+ srcs = ["main_estimator.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":cifar_input",
+ ":main",
+ ":revnet",
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
+py_library(
+ name = "main_estimator_tpu_lib",
+ srcs = ["main_estimator_tpu.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":cifar_input",
+ ":main",
+ ":revnet",
+ "//tensorflow:tensorflow_py",
+ ],
+)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/blocks.py b/tensorflow/contrib/eager/python/examples/revnet/blocks.py
index 306096e9f8..8a530b0d71 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/blocks.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/blocks.py
@@ -24,6 +24,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import functools
+import operator
+
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import ops
@@ -45,7 +48,7 @@ class RevBlock(tf.keras.Model):
bottleneck=False,
fused=True,
dtype=tf.float32):
- """Initialize RevBlock.
+ """Initialization.
Args:
n_res: number of residual blocks
@@ -99,7 +102,6 @@ class RevBlock(tf.keras.Model):
if i == 0:
# First block usually contains downsampling that can't be reversed
with tf.GradientTape() as tape:
- x = tf.identity(x)
tape.watch(x)
y = block(x, training=training)
@@ -121,16 +123,6 @@ class _Residual(tf.keras.Model):
"""Single residual block contained in a _RevBlock. Each `_Residual` object has
two _ResidualInner objects, corresponding to the `F` and `G` functions in the
paper.
-
- Args:
- filters: output filter size
- strides: length 2 list/tuple of integers for height and width strides
- input_shape: length 3 list/tuple of integers
- batch_norm_first: whether to apply activation and batch norm before conv
- data_format: tensor data format, "NCHW"/"NHWC",
- bottleneck: use bottleneck residual if True
- fused: use fused batch normalization if True
- dtype: float16, float32, or float64
"""
def __init__(self,
@@ -142,6 +134,18 @@ class _Residual(tf.keras.Model):
bottleneck=False,
fused=True,
dtype=tf.float32):
+ """Initialization.
+
+ Args:
+ filters: output filter size
+ strides: length 2 list/tuple of integers for height and width strides
+ input_shape: length 3 list/tuple of integers
+ batch_norm_first: whether to apply activation and batch norm before conv
+ data_format: tensor data format, "NCHW"/"NHWC",
+ bottleneck: use bottleneck residual if True
+ fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
+ """
super(_Residual, self).__init__()
self.filters = filters
@@ -196,23 +200,20 @@ class _Residual(tf.keras.Model):
dy1, dy2 = tf.split(dy, num_or_size_splits=2, axis=self.axis)
with tf.GradientTape(persistent=True) as tape:
- y = tf.identity(y)
tape.watch(y)
y1, y2 = tf.split(y, num_or_size_splits=2, axis=self.axis)
- z1 = y1
- gz1 = self.g(z1, training=training)
- x2 = y2 - gz1
+ gy1 = self.g(y1, training=training)
+ x2 = y2 - gy1
fx2 = self.f(x2, training=training)
- x1 = z1 - fx2
+ x1 = y1 - fx2
grads_combined = tape.gradient(
- gz1, [z1] + self.g.trainable_variables, output_gradients=dy2)
- dz1 = dy1 + grads_combined[0]
+ gy1, [y1] + self.g.trainable_variables, output_gradients=dy2)
dg = grads_combined[1:]
- dx1 = dz1
+ dx1 = dy1 + grads_combined[0]
grads_combined = tape.gradient(
- fx2, [x2] + self.f.trainable_variables, output_gradients=dz1)
+ fx2, [x2] + self.f.trainable_variables, output_gradients=dx1)
dx2 = dy2 + grads_combined[0]
df = grads_combined[1:]
@@ -227,131 +228,250 @@ class _Residual(tf.keras.Model):
return x, dx, grads, vars_
-def _BottleneckResidualInner(filters,
- strides,
- input_shape,
- batch_norm_first=True,
- data_format="channels_first",
- fused=True,
- dtype=tf.float32):
+# Ideally, the following should be wrapped in `tf.keras.Sequential`, however
+# there are subtle issues with its placeholder insertion policy and batch norm
+class _BottleneckResidualInner(tf.keras.Model):
"""Single bottleneck residual inner function contained in _Resdual.
Corresponds to the `F`/`G` functions in the paper.
Suitable for training on ImageNet dataset.
-
- Args:
- filters: output filter size
- strides: length 2 list/tuple of integers for height and width strides
- input_shape: length 3 list/tuple of integers
- batch_norm_first: whether to apply activation and batch norm before conv
- data_format: tensor data format, "NCHW"/"NHWC"
- fused: use fused batch normalization if True
- dtype: float16, float32, or float64
-
- Returns:
- A keras model
"""
- axis = 1 if data_format == "channels_first" else 3
- model = tf.keras.Sequential()
- if batch_norm_first:
- model.add(
- tf.keras.layers.BatchNormalization(
- axis=axis, input_shape=input_shape, fused=fused, dtype=dtype))
- model.add(tf.keras.layers.Activation("relu"))
- model.add(
- tf.keras.layers.Conv2D(
- filters=filters // 4,
- kernel_size=1,
- strides=strides,
- input_shape=input_shape,
- data_format=data_format,
- use_bias=False,
- padding="SAME",
- dtype=dtype))
-
- model.add(
- tf.keras.layers.BatchNormalization(axis=axis, fused=fused, dtype=dtype))
- model.add(tf.keras.layers.Activation("relu"))
- model.add(
- tf.keras.layers.Conv2D(
- filters=filters // 4,
- kernel_size=3,
- strides=(1, 1),
- data_format=data_format,
- use_bias=False,
- padding="SAME",
- dtype=dtype))
-
- model.add(
- tf.keras.layers.BatchNormalization(axis=axis, fused=fused, dtype=dtype))
- model.add(tf.keras.layers.Activation("relu"))
- model.add(
- tf.keras.layers.Conv2D(
- filters=filters,
- kernel_size=1,
- strides=(1, 1),
- data_format=data_format,
- use_bias=False,
- padding="SAME",
- dtype=dtype))
+ def __init__(self,
+ filters,
+ strides,
+ input_shape,
+ batch_norm_first=True,
+ data_format="channels_first",
+ fused=True,
+ dtype=tf.float32):
+ """Initialization.
+
+ Args:
+ filters: output filter size
+ strides: length 2 list/tuple of integers for height and width strides
+ input_shape: length 3 list/tuple of integers
+ batch_norm_first: whether to apply activation and batch norm before conv
+ data_format: tensor data format, "NCHW"/"NHWC"
+ fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
+ """
+ super(_BottleneckResidualInner, self).__init__()
+ axis = 1 if data_format == "channels_first" else 3
+ if batch_norm_first:
+ self.batch_norm_0 = tf.keras.layers.BatchNormalization(
+ axis=axis, input_shape=input_shape, fused=fused, dtype=dtype)
+ self.conv2d_1 = tf.keras.layers.Conv2D(
+ filters=filters // 4,
+ kernel_size=1,
+ strides=strides,
+ input_shape=input_shape,
+ data_format=data_format,
+ use_bias=False,
+ padding="SAME",
+ dtype=dtype)
+
+ self.batch_norm_1 = tf.keras.layers.BatchNormalization(
+ axis=axis, fused=fused, dtype=dtype)
+ self.conv2d_2 = tf.keras.layers.Conv2D(
+ filters=filters // 4,
+ kernel_size=3,
+ strides=(1, 1),
+ data_format=data_format,
+ use_bias=False,
+ padding="SAME",
+ dtype=dtype)
+
+ self.batch_norm_2 = tf.keras.layers.BatchNormalization(
+ axis=axis, fused=fused, dtype=dtype)
+ self.conv2d_3 = tf.keras.layers.Conv2D(
+ filters=filters,
+ kernel_size=1,
+ strides=(1, 1),
+ data_format=data_format,
+ use_bias=False,
+ padding="SAME",
+ dtype=dtype)
+
+ self.batch_norm_first = batch_norm_first
+
+ def call(self, x, training=True):
+ net = x
+ if self.batch_norm_first:
+ net = self.batch_norm_0(net, training=training)
+ net = tf.nn.relu(net)
+ net = self.conv2d_1(net)
+
+ net = self.batch_norm_1(net, training=training)
+ net = tf.nn.relu(net)
+ net = self.conv2d_2(net)
- return model
+ net = self.batch_norm_2(net, training=training)
+ net = tf.nn.relu(net)
+ net = self.conv2d_3(net)
+ return net
-def _ResidualInner(filters,
- strides,
- input_shape,
- batch_norm_first=True,
- data_format="channels_first",
- fused=True,
- dtype=tf.float32):
+
+class _ResidualInner(tf.keras.Model):
"""Single residual inner function contained in _ResdualBlock.
Corresponds to the `F`/`G` functions in the paper.
-
- Args:
- filters: output filter size
- strides: length 2 list/tuple of integers for height and width strides
- input_shape: length 3 list/tuple of integers
- batch_norm_first: whether to apply activation and batch norm before conv
- data_format: tensor data format, "NCHW"/"NHWC"
- fused: use fused batch normalization if True
- dtype: float16, float32, or float64
-
- Returns:
- A keras model
"""
- axis = 1 if data_format == "channels_first" else 3
- model = tf.keras.Sequential()
- if batch_norm_first:
- model.add(
- tf.keras.layers.BatchNormalization(
- axis=axis, input_shape=input_shape, fused=fused, dtype=dtype))
- model.add(tf.keras.layers.Activation("relu"))
- model.add(
- tf.keras.layers.Conv2D(
- filters=filters,
- kernel_size=3,
- strides=strides,
- input_shape=input_shape,
- data_format=data_format,
- use_bias=False,
- padding="SAME",
- dtype=dtype))
-
- model.add(
- tf.keras.layers.BatchNormalization(axis=axis, fused=fused, dtype=dtype))
- model.add(tf.keras.layers.Activation("relu"))
- model.add(
- tf.keras.layers.Conv2D(
- filters=filters,
- kernel_size=3,
- strides=(1, 1),
- data_format=data_format,
- use_bias=False,
+ def __init__(self,
+ filters,
+ strides,
+ input_shape,
+ batch_norm_first=True,
+ data_format="channels_first",
+ fused=True,
+ dtype=tf.float32):
+ """Initialization.
+
+ Args:
+ filters: output filter size
+ strides: length 2 list/tuple of integers for height and width strides
+ input_shape: length 3 list/tuple of integers
+ batch_norm_first: whether to apply activation and batch norm before conv
+ data_format: tensor data format, "NCHW"/"NHWC"
+ fused: use fused batch normalization if True
+ dtype: float16, float32, or float64
+ """
+ super(_ResidualInner, self).__init__()
+ axis = 1 if data_format == "channels_first" else 3
+ if batch_norm_first:
+ self.batch_norm_0 = tf.keras.layers.BatchNormalization(
+ axis=axis, input_shape=input_shape, fused=fused, dtype=dtype)
+ self.conv2d_1 = tf.keras.layers.Conv2D(
+ filters=filters,
+ kernel_size=3,
+ strides=strides,
+ input_shape=input_shape,
+ data_format=data_format,
+ use_bias=False,
+ padding="SAME",
+ dtype=dtype)
+
+ self.batch_norm_1 = tf.keras.layers.BatchNormalization(
+ axis=axis, fused=fused, dtype=dtype)
+ self.conv2d_2 = tf.keras.layers.Conv2D(
+ filters=filters,
+ kernel_size=3,
+ strides=(1, 1),
+ data_format=data_format,
+ use_bias=False,
+ padding="SAME",
+ dtype=dtype)
+
+ self.batch_norm_first = batch_norm_first
+
+ def call(self, x, training=True):
+ net = x
+ if self.batch_norm_first:
+ net = self.batch_norm_0(net, training=training)
+ net = tf.nn.relu(net)
+ net = self.conv2d_1(net)
+
+ net = self.batch_norm_1(net, training=training)
+ net = tf.nn.relu(net)
+ net = self.conv2d_2(net)
+
+ return net
+
+
+class InitBlock(tf.keras.Model):
+ """Initial block of RevNet."""
+
+ def __init__(self, config):
+ """Initialization.
+
+ Args:
+ config: tf.contrib.training.HParams object; specifies hyperparameters
+ """
+ super(InitBlock, self).__init__()
+ self.config = config
+ self.axis = 1 if self.config.data_format == "channels_first" else 3
+ self.conv2d = tf.keras.layers.Conv2D(
+ filters=self.config.init_filters,
+ kernel_size=self.config.init_kernel,
+ strides=(self.config.init_stride, self.config.init_stride),
+ data_format=self.config.data_format,
+ use_bias=False,
+ padding="SAME",
+ input_shape=self.config.input_shape,
+ dtype=self.config.dtype)
+ self.batch_norm = tf.keras.layers.BatchNormalization(
+ axis=self.axis, fused=self.config.fused, dtype=self.config.dtype)
+ self.activation = tf.keras.layers.Activation("relu")
+
+ if self.config.init_max_pool:
+ self.max_pool = tf.keras.layers.MaxPooling2D(
+ pool_size=(3, 3),
+ strides=(2, 2),
padding="SAME",
- dtype=dtype))
+ data_format=self.config.data_format,
+ dtype=self.config.dtype)
+
+ def call(self, x, training=True):
+ net = x
+ net = self.conv2d(net)
+ net = self.batch_norm(net, training=training)
+ net = self.activation(net)
+
+ if self.config.init_max_pool:
+ net = self.max_pool(net)
+
+ return net
+
- return model
+class FinalBlock(tf.keras.Model):
+ """Final block of RevNet."""
+
+ def __init__(self, config):
+ """Initialization.
+
+ Args:
+ config: tf.contrib.training.HParams object; specifies hyperparameters
+
+ Raises:
+ ValueError: Unsupported data format
+ """
+ super(FinalBlock, self).__init__()
+ self.config = config
+ self.axis = 1 if self.config.data_format == "channels_first" else 3
+
+ f = self.config.filters[-1] # Number of filters
+ r = functools.reduce(operator.mul, self.config.strides, 1) # Reduce ratio
+ r *= self.config.init_stride
+ if self.config.init_max_pool:
+ r *= 2
+
+ if self.config.data_format == "channels_first":
+ w, h = self.config.input_shape[1], self.config.input_shape[2]
+ input_shape = (f, w // r, h // r)
+ elif self.config.data_format == "channels_last":
+ w, h = self.config.input_shape[0], self.config.input_shape[1]
+ input_shape = (w // r, h // r, f)
+ else:
+ raise ValueError("Data format should be either `channels_first`"
+ " or `channels_last`")
+ self.batch_norm = tf.keras.layers.BatchNormalization(
+ axis=self.axis,
+ input_shape=input_shape,
+ fused=self.config.fused,
+ dtype=self.config.dtype)
+ self.activation = tf.keras.layers.Activation("relu")
+ self.global_avg_pool = tf.keras.layers.GlobalAveragePooling2D(
+ data_format=self.config.data_format, dtype=self.config.dtype)
+ self.dense = tf.keras.layers.Dense(
+ self.config.n_classes, dtype=self.config.dtype)
+
+ def call(self, x, training=True):
+ net = x
+ net = self.batch_norm(net, training=training)
+ net = self.activation(net)
+ net = self.global_avg_pool(net)
+ net = self.dense(net)
+
+ return net
diff --git a/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py b/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
index b6d4c35bfd..e9672f13e1 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/cifar_input.py
@@ -111,6 +111,6 @@ def get_ds_from_tfrecords(data_dir,
}[split]
dataset = dataset.shuffle(size)
- dataset = dataset.batch(batch_size)
+ dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset
diff --git a/tensorflow/contrib/eager/python/examples/revnet/config.py b/tensorflow/contrib/eager/python/examples/revnet/config.py
index 3d93fa955a..821a4878c1 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/config.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/config.py
@@ -27,17 +27,17 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf
-tfe = tf.contrib.eager
def get_hparams_cifar_38():
"""RevNet-38 configurations for CIFAR-10/CIFAR-100."""
config = tf.contrib.training.HParams()
+ config.add_hparam("num_train_images", 50000)
+ config.add_hparam("num_eval_images", 10000)
config.add_hparam("init_filters", 32)
config.add_hparam("init_kernel", 3)
config.add_hparam("init_stride", 1)
- config.add_hparam("n_classes", 10)
config.add_hparam("n_rev_blocks", 3)
config.add_hparam("n_res", [3, 3, 3])
config.add_hparam("filters", [32, 64, 112])
@@ -46,7 +46,7 @@ def get_hparams_cifar_38():
config.add_hparam("bottleneck", False)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", False)
- if tfe.num_gpus() > 0:
+ if tf.test.is_gpu_available():
config.add_hparam("input_shape", (3, 32, 32))
config.add_hparam("data_format", "channels_first")
else:
@@ -68,9 +68,21 @@ def get_hparams_cifar_38():
config.add_hparam("div255", True)
# This is imprecise, when training with validation set,
# we only have 40k images in training data
- config.add_hparam("iters_per_epoch", 50000 // config.batch_size)
+ config.add_hparam("iters_per_epoch",
+ config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
+ # Customized TPU hyperparameters due to differing batch size caused by
+ # TPU architecture specifics
+ # Suggested batch sizes to reduce overhead from excessive tensor padding
+ # https://cloud.google.com/tpu/docs/troubleshooting
+ config.add_hparam("tpu_batch_size", 1024)
+ config.add_hparam("tpu_eval_batch_size", 1024)
+ config.add_hparam("tpu_iters_per_epoch",
+ config.num_train_images // config.tpu_batch_size)
+ config.add_hparam("tpu_epochs",
+ config.max_train_iter // config.tpu_iters_per_epoch)
+
return config
@@ -98,15 +110,18 @@ def get_hparams_imagenet_56():
"""RevNet-56 configurations for ImageNet."""
config = tf.contrib.training.HParams()
+ config.add_hparam("n_classes", 1000)
+ config.add_hparam("dataset", "ImageNet")
+ config.add_hparam("num_train_images", 1281167)
+ config.add_hparam("num_eval_images", 50000)
config.add_hparam("init_filters", 128)
config.add_hparam("init_kernel", 7)
config.add_hparam("init_stride", 2)
- config.add_hparam("n_classes", 1000)
config.add_hparam("n_rev_blocks", 4)
config.add_hparam("n_res", [2, 2, 2, 2])
config.add_hparam("filters", [128, 256, 512, 832])
config.add_hparam("strides", [1, 2, 2, 2])
- config.add_hparam("batch_size", 16)
+ config.add_hparam("batch_size", 256)
config.add_hparam("bottleneck", True)
config.add_hparam("fused", True)
config.add_hparam("init_max_pool", True)
@@ -116,6 +131,9 @@ def get_hparams_imagenet_56():
else:
config.add_hparam("input_shape", (224, 224, 3))
config.add_hparam("data_format", "channels_last")
+ # Due to bottleneck residual blocks
+ filters = [f * 4 for f in config.filters]
+ config.filters = filters
# Training details
config.add_hparam("weight_decay", 1e-4)
@@ -125,16 +143,31 @@ def get_hparams_imagenet_56():
config.add_hparam("max_train_iter", 600000)
config.add_hparam("seed", 1234)
config.add_hparam("shuffle", True)
- config.add_hparam("log_every", 50)
- config.add_hparam("save_every", 50)
+ config.add_hparam("log_every", 500)
+ config.add_hparam("save_every", 500)
config.add_hparam("dtype", tf.float32)
- config.add_hparam("eval_batch_size", 1000)
+ config.add_hparam("eval_batch_size", 256)
config.add_hparam("div255", True)
- # TODO(lxuechen): Update this according to ImageNet data
- config.add_hparam("iters_per_epoch", 50000 // config.batch_size)
+ config.add_hparam("iters_per_epoch",
+ config.num_train_images // config.batch_size)
config.add_hparam("epochs", config.max_train_iter // config.iters_per_epoch)
- # Due to bottleneck residual blocks
- filters = [f * 4 for f in config.filters]
- config.filters = filters
+
+ # Customized TPU hyperparameters due to differing batch size caused by
+ # TPU architecture specifics
+ # Suggested batch sizes to reduce overhead from excessive tensor padding
+ # https://cloud.google.com/tpu/docs/troubleshooting
+ config.add_hparam("tpu_batch_size", 1024)
+ config.add_hparam("tpu_eval_batch_size", 1024)
+ config.add_hparam("tpu_iters_per_epoch",
+ config.num_train_images // config.tpu_batch_size)
+ config.add_hparam("tpu_epochs",
+ config.max_train_iter // config.tpu_iters_per_epoch)
+
+ return config
+
+
+def get_hparams_imagenet_104():
+ config = get_hparams_imagenet_56()
+ config.n_res = [2, 2, 11, 2]
return config
diff --git a/tensorflow/contrib/eager/python/examples/revnet/main.py b/tensorflow/contrib/eager/python/examples/revnet/main.py
index e2f43b03f9..dcd4e1697f 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/main.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/main.py
@@ -31,8 +31,11 @@ tfe = tf.contrib.eager
def main(_):
"""Eager execution workflow with RevNet trained on CIFAR-10."""
- config = get_config()
- ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(config)
+ tf.enable_eager_execution()
+
+ config = get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
+ ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(
+ data_dir=FLAGS.data_dir, config=config)
model = revnet.RevNet(config=config)
global_step = tf.train.get_or_create_global_step() # Ensure correct summary
global_step.assign(1)
@@ -43,6 +46,9 @@ def main(_):
checkpointer = tf.train.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=global_step)
+ if FLAGS.use_defun:
+ model.call = tfe.defun(model.call)
+
if FLAGS.train_dir:
summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
if FLAGS.restore:
@@ -52,46 +58,37 @@ def main(_):
"with global_step: {}".format(latest_path, global_step.numpy()))
sys.stdout.flush()
- if FLAGS.manual_grad:
- print("Using manual gradients.")
- else:
- print("Not using manual gradients.")
- sys.stdout.flush()
-
for x, y in ds_train:
train_one_iter(model, x, y, optimizer, global_step=global_step)
if global_step.numpy() % config.log_every == 0:
- it_train = ds_train_one_shot.make_one_shot_iterator()
it_test = ds_test.make_one_shot_iterator()
- acc_train, loss_train = evaluate(model, it_train)
acc_test, loss_test = evaluate(model, it_test)
if FLAGS.validate:
+ it_train = ds_train_one_shot.make_one_shot_iterator()
it_validation = ds_validation.make_one_shot_iterator()
+ acc_train, loss_train = evaluate(model, it_train)
acc_validation, loss_validation = evaluate(model, it_validation)
print("Iter {}, "
"training set accuracy {:.4f}, loss {:.4f}; "
- "validation set accuracy {:.4f}, loss {:4.f}"
+ "validation set accuracy {:.4f}, loss {:.4f}; "
"test accuracy {:.4f}, loss {:.4f}".format(
global_step.numpy(), acc_train, loss_train, acc_validation,
loss_validation, acc_test, loss_test))
else:
- print("Iter {}, "
- "training set accuracy {:.4f}, loss {:.4f}; "
- "test accuracy {:.4f}, loss {:.4f}".format(
- global_step.numpy(), acc_train, loss_train, acc_test,
- loss_test))
+ print("Iter {}, test accuracy {:.4f}, loss {:.4f}".format(
+ global_step.numpy(), acc_test, loss_test))
sys.stdout.flush()
if FLAGS.train_dir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
- tf.contrib.summary.scalar("Training accuracy", acc_train)
tf.contrib.summary.scalar("Test accuracy", acc_test)
- tf.contrib.summary.scalar("Training loss", loss_train)
tf.contrib.summary.scalar("Test loss", loss_test)
if FLAGS.validate:
+ tf.contrib.summary.scalar("Training accuracy", acc_train)
+ tf.contrib.summary.scalar("Training loss", loss_train)
tf.contrib.summary.scalar("Validation accuracy", acc_validation)
tf.contrib.summary.scalar("Validation loss", loss_validation)
@@ -103,34 +100,38 @@ def main(_):
sys.stdout.flush()
-def get_config():
+def get_config(config_name="revnet-38", dataset="cifar-10"):
"""Return configuration."""
- print("Config: {}".format(FLAGS.config))
+ print("Config: {}".format(config_name))
sys.stdout.flush()
config = {
"revnet-38": config_.get_hparams_cifar_38(),
"revnet-110": config_.get_hparams_cifar_110(),
"revnet-164": config_.get_hparams_cifar_164(),
- }[FLAGS.config]
+ }[config_name]
- if FLAGS.dataset == "cifar-100":
- config.n_classes = 100
+ if dataset == "cifar-10":
+ config.add_hparam("n_classes", 10)
+ config.add_hparam("dataset", "cifar-10")
+ else:
+ config.add_hparam("n_classes", 100)
+ config.add_hparam("dataset", "cifar-100")
return config
-def get_datasets(config):
+def get_datasets(data_dir, config):
"""Return dataset."""
- if FLAGS.data_dir is None:
+ if data_dir is None:
raise ValueError("No supplied data directory")
- if not os.path.exists(FLAGS.data_dir):
- raise ValueError("Data directory {} does not exist".format(FLAGS.data_dir))
- if FLAGS.dataset not in ["cifar-10", "cifar-100"]:
- raise ValueError("Unknown dataset {}".format(FLAGS.dataset))
+ if not os.path.exists(data_dir):
+ raise ValueError("Data directory {} does not exist".format(data_dir))
+ if config.dataset not in ["cifar-10", "cifar-100"]:
+ raise ValueError("Unknown dataset {}".format(config.dataset))
- print("Training on {} dataset.".format(FLAGS.dataset))
+ print("Training on {} dataset.".format(config.dataset))
sys.stdout.flush()
- data_dir = os.path.join(FLAGS.data_dir, FLAGS.dataset)
+ data_dir = os.path.join(data_dir, config.dataset)
if FLAGS.validate:
# 40k Training set
ds_train = cifar_input.get_ds_from_tfrecords(
@@ -168,7 +169,7 @@ def get_datasets(config):
prefetch=config.batch_size)
ds_validation = None
- # Always compute loss and accuracy on whole training and test set
+ # Always compute loss and accuracy on whole test set
ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
data_dir=data_dir,
split="train_all",
@@ -196,19 +197,11 @@ def get_datasets(config):
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
- if FLAGS.manual_grad:
- grads, vars_, loss = model.compute_gradients(inputs, labels, training=True)
- optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
- else: # For correctness validation
- with tf.GradientTape() as tape:
- logits, _ = model(inputs, training=True)
- loss = model.compute_loss(logits=logits, labels=labels)
- tf.logging.info("Logits are placed on device: {}".format(logits.device))
- grads = tape.gradient(loss, model.trainable_variables)
- optimizer.apply_gradients(
- zip(grads, model.trainable_variables), global_step=global_step)
+ grads, vars_, logits, loss = model.compute_gradients(
+ inputs, labels, training=True)
+ optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
- return loss.numpy()
+ return logits, loss
def evaluate(model, iterator):
@@ -241,16 +234,18 @@ if __name__ == "__main__":
"validate",
default=False,
help="[Optional] Use the validation set or not for hyperparameter search")
- flags.DEFINE_boolean(
- "manual_grad",
- default=False,
- help="[Optional] Use manual gradient graph to save memory")
flags.DEFINE_string(
"dataset",
default="cifar-10",
help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
flags.DEFINE_string(
- "config", default="revnet-38", help="[Optional] Architecture of network.")
+ "config",
+ default="revnet-38",
+ help="[Optional] Architecture of network. "
+ "Other options include `revnet-110` and `revnet-164`")
+ flags.DEFINE_boolean(
+ "use_defun",
+ default=False,
+ help="[Optional] Use `tfe.defun` to boost performance.")
FLAGS = flags.FLAGS
- tf.enable_eager_execution()
tf.app.run(main)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/main_estimator.py b/tensorflow/contrib/eager/python/examples/revnet/main_estimator.py
new file mode 100644
index 0000000000..4868f1931f
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/revnet/main_estimator.py
@@ -0,0 +1,200 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Estimator workflow with RevNet train on CIFAR-10."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+from absl import flags
+import tensorflow as tf
+from tensorflow.contrib.eager.python.examples.revnet import cifar_input
+from tensorflow.contrib.eager.python.examples.revnet import main as main_
+from tensorflow.contrib.eager.python.examples.revnet import revnet
+
+
+def model_fn(features, labels, mode, params):
+ """Function specifying the model that is required by the `tf.estimator` API.
+
+ Args:
+ features: Input images
+ labels: Labels of images
+ mode: One of `ModeKeys.TRAIN`, `ModeKeys.EVAL` or 'ModeKeys.PREDICT'
+ params: A dictionary of extra parameter that might be passed
+
+ Returns:
+ An instance of `tf.estimator.EstimatorSpec`
+ """
+
+ inputs = features
+ if isinstance(inputs, dict):
+ inputs = features["image"]
+
+ config = params["config"]
+ model = revnet.RevNet(config=config)
+
+ if mode == tf.estimator.ModeKeys.TRAIN:
+ global_step = tf.train.get_or_create_global_step()
+ learning_rate = tf.train.piecewise_constant(
+ global_step, config.lr_decay_steps, config.lr_list)
+ optimizer = tf.train.MomentumOptimizer(
+ learning_rate, momentum=config.momentum)
+ grads, vars_, logits, loss = model.compute_gradients(
+ inputs, labels, training=True)
+ train_op = optimizer.apply_gradients(
+ zip(grads, vars_), global_step=global_step)
+
+ return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
+ else:
+ logits, _ = model(inputs, training=False)
+ predictions = tf.argmax(logits, axis=1)
+ probabilities = tf.nn.softmax(logits)
+
+ if mode == tf.estimator.ModeKeys.EVAL:
+ loss = model.compute_loss(labels=labels, logits=logits)
+ return tf.estimator.EstimatorSpec(
+ mode=mode,
+ loss=loss,
+ eval_metric_ops={
+ "accuracy":
+ tf.metrics.accuracy(labels=labels, predictions=predictions)
+ })
+
+ else: # mode == tf.estimator.ModeKeys.PREDICT
+ result = {
+ "classes": predictions,
+ "probabilities": probabilities,
+ }
+
+ return tf.estimator.EstimatorSpec(
+ mode=mode,
+ predictions=predictions,
+ export_outputs={
+ "classify": tf.estimator.export.PredictOutput(result)
+ })
+
+
+def get_input_fn(config, data_dir, split):
+ """Get the input function that is required by the `tf.estimator` API.
+
+ Args:
+ config: Customized hyperparameters
+ data_dir: Directory where the data is stored
+ split: One of `train`, `validation`, `train_all`, and `test`
+
+ Returns:
+ Input function required by the `tf.estimator` API
+ """
+
+ data_dir = os.path.join(data_dir, config.dataset)
+ # Fix split-dependent hyperparameters
+ if split == "train_all" or split == "train":
+ data_aug = True
+ batch_size = config.batch_size
+ epochs = config.epochs
+ shuffle = True
+ prefetch = config.batch_size
+ else:
+ data_aug = False
+ batch_size = config.eval_batch_size
+ epochs = 1
+ shuffle = False
+ prefetch = config.eval_batch_size
+
+ def input_fn():
+ """Input function required by the `tf.estimator.Estimator` API."""
+ return cifar_input.get_ds_from_tfrecords(
+ data_dir=data_dir,
+ split=split,
+ data_aug=data_aug,
+ batch_size=batch_size,
+ epochs=epochs,
+ shuffle=shuffle,
+ prefetch=prefetch,
+ data_format=config.data_format)
+
+ return input_fn
+
+
+def main(argv):
+ FLAGS = argv[0] # pylint:disable=invalid-name,redefined-outer-name
+ tf.logging.set_verbosity(tf.logging.INFO)
+
+ # RevNet specific configuration
+ config = main_.get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
+
+ # Estimator specific configuration
+ run_config = tf.estimator.RunConfig(
+ model_dir=FLAGS.train_dir, # Directory for storing checkpoints
+ tf_random_seed=config.seed,
+ save_summary_steps=config.log_every,
+ save_checkpoints_steps=config.log_every,
+ session_config=None, # Using default
+ keep_checkpoint_max=100,
+ keep_checkpoint_every_n_hours=10000, # Using default
+ log_step_count_steps=config.log_every,
+ train_distribute=None # Default not use distribution strategy
+ )
+
+ # Construct estimator
+ revnet_estimator = tf.estimator.Estimator(
+ model_fn=model_fn,
+ model_dir=FLAGS.train_dir,
+ config=run_config,
+ params={"config": config})
+
+ # Construct input functions
+ train_input_fn = get_input_fn(
+ config=config, data_dir=FLAGS.data_dir, split="train_all")
+ eval_input_fn = get_input_fn(
+ config=config, data_dir=FLAGS.data_dir, split="test")
+
+ # Train and evaluate estimator
+ revnet_estimator.train(input_fn=train_input_fn)
+ revnet_estimator.evaluate(input_fn=eval_input_fn)
+
+ if FLAGS.export:
+ input_shape = (None,) + config.input_shape
+ inputs = tf.placeholder(tf.float32, shape=input_shape)
+ input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
+ "image": inputs
+ })
+ revnet_estimator.export_savedmodel(FLAGS.train_dir, input_fn)
+
+
+if __name__ == "__main__":
+ flags.DEFINE_string(
+ "data_dir", default=None, help="Directory to load tfrecords")
+ flags.DEFINE_string(
+ "train_dir",
+ default=None,
+ help="[Optional] Directory to store the training information")
+ flags.DEFINE_string(
+ "dataset",
+ default="cifar-10",
+ help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
+ flags.DEFINE_boolean(
+ "export",
+ default=False,
+ help="[Optional] Export the model for serving if True")
+ flags.DEFINE_string(
+ "config",
+ default="revnet-38",
+ help="[Optional] Architecture of network. "
+ "Other options include `revnet-110` and `revnet-164`")
+ FLAGS = flags.FLAGS
+ tf.app.run(main=main, argv=[FLAGS])
diff --git a/tensorflow/contrib/eager/python/examples/revnet/main_estimator_tpu.py b/tensorflow/contrib/eager/python/examples/revnet/main_estimator_tpu.py
new file mode 100644
index 0000000000..d809bcd287
--- /dev/null
+++ b/tensorflow/contrib/eager/python/examples/revnet/main_estimator_tpu.py
@@ -0,0 +1,328 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Cloud TPU Estimator workflow with RevNet train on CIFAR-10."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+import time
+
+from absl import flags
+import tensorflow as tf
+from tensorflow.contrib.eager.python.examples.revnet import cifar_input
+from tensorflow.contrib.eager.python.examples.revnet import main as main_
+from tensorflow.contrib.eager.python.examples.revnet import revnet
+from tensorflow.contrib.training.python.training import evaluation
+from tensorflow.python.estimator import estimator as estimator_
+
+
+def model_fn(features, labels, mode, params):
+ """Model function required by the `tf.contrib.tpu.TPUEstimator` API.
+
+ Args:
+ features: Input images
+ labels: Labels of images
+ mode: One of `ModeKeys.TRAIN`, `ModeKeys.EVAL` or 'ModeKeys.PREDICT'
+ params: A dictionary of extra parameter that might be passed
+
+ Returns:
+ An instance of `tf.contrib.tpu.TPUEstimatorSpec`
+ """
+
+ inputs = features
+ if isinstance(inputs, dict):
+ inputs = features["image"]
+
+ FLAGS = params["FLAGS"] # pylint:disable=invalid-name,redefined-outer-name
+ config = params["config"]
+ model = revnet.RevNet(config=config)
+
+ if mode == tf.estimator.ModeKeys.TRAIN:
+ global_step = tf.train.get_or_create_global_step()
+ learning_rate = tf.train.piecewise_constant(
+ global_step, config.lr_decay_steps, config.lr_list)
+ optimizer = tf.train.MomentumOptimizer(
+ learning_rate, momentum=config.momentum)
+
+ if FLAGS.use_tpu:
+ optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
+
+ # Define gradients
+ grads, vars_, logits, loss = model.compute_gradients(
+ inputs, labels, training=True)
+ train_op = optimizer.apply_gradients(
+ zip(grads, vars_), global_step=global_step)
+
+ names = [v.name for v in model.variables]
+ tf.logging.warn("{}".format(names))
+
+ return tf.contrib.tpu.TPUEstimatorSpec(
+ mode=tf.estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op)
+
+ elif mode == tf.estimator.ModeKeys.EVAL:
+ logits, _ = model(inputs, training=False)
+ loss = model.compute_loss(labels=labels, logits=logits)
+
+ def metric_fn(labels, logits):
+ predictions = tf.argmax(logits, axis=1)
+ accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions)
+ return {
+ "accuracy": accuracy,
+ }
+
+ return tf.contrib.tpu.TPUEstimatorSpec(
+ mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))
+
+ else: # Predict or export
+ logits, _ = model(inputs, training=False)
+ predictions = {
+ "classes": tf.argmax(logits, axis=1),
+ "probabilities": tf.nn.softmax(logits),
+ }
+
+ return tf.contrib.tpu.TPUEstimatorSpec(
+ mode=mode,
+ predictions=predictions,
+ export_outputs={
+ "classify": tf.estimator.export.PredictOutput(predictions)
+ })
+
+
+def get_input_fn(config, data_dir, split):
+ """Get the input function required by the `tf.contrib.tpu.TPUEstimator` API.
+
+ Args:
+ config: Customized hyperparameters
+ data_dir: Directory where the data is stored
+ split: One of `train`, `validation`, `train_all`, and `test`
+
+ Returns:
+ Input function required by the `tf.contrib.tpu.TPUEstimator` API
+ """
+
+ data_dir = os.path.join(data_dir, config.dataset)
+ # Fix split-dependent hyperparameters
+ if split == "train_all" or split == "train":
+ data_aug = True
+ epochs = config.tpu_epochs
+ shuffle = True
+ else:
+ data_aug = False
+ epochs = 1
+ shuffle = False
+
+ def input_fn(params):
+ """Input function required by the `tf.contrib.tpu.TPUEstimator` API."""
+ batch_size = params["batch_size"]
+ return cifar_input.get_ds_from_tfrecords(
+ data_dir=data_dir,
+ split=split,
+ data_aug=data_aug,
+ batch_size=batch_size, # per-shard batch size
+ epochs=epochs,
+ shuffle=shuffle,
+ prefetch=batch_size, # per-shard batch size
+ data_format=config.data_format)
+
+ return input_fn
+
+
+def main(argv):
+ FLAGS = argv[0] # pylint:disable=invalid-name,redefined-outer-name
+ tf.logging.set_verbosity(tf.logging.INFO)
+
+ # RevNet specific configuration
+ config = main_.get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
+
+ if FLAGS.use_tpu:
+ tf.logging.info("Using TPU.")
+ tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
+ FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
+ else:
+ tpu_cluster_resolver = None
+
+ # TPU specific configuration
+ tpu_config = tf.contrib.tpu.TPUConfig(
+ # Recommended to be set as number of global steps for next checkpoint
+ iterations_per_loop=FLAGS.iterations_per_loop,
+ num_shards=FLAGS.num_shards)
+
+ # Estimator specific configuration
+ run_config = tf.contrib.tpu.RunConfig(
+ cluster=tpu_cluster_resolver,
+ model_dir=FLAGS.model_dir,
+ session_config=tf.ConfigProto(
+ allow_soft_placement=True, log_device_placement=False),
+ tpu_config=tpu_config,
+ )
+
+ # Construct TPU Estimator
+ estimator = tf.contrib.tpu.TPUEstimator(
+ model_fn=model_fn,
+ use_tpu=FLAGS.use_tpu,
+ train_batch_size=config.tpu_batch_size,
+ eval_batch_size=config.tpu_eval_batch_size,
+ config=run_config,
+ params={
+ "FLAGS": FLAGS,
+ "config": config,
+ })
+
+ # Construct input functions
+ train_input_fn = get_input_fn(
+ config=config, data_dir=FLAGS.data_dir, split="train_all")
+ eval_input_fn = get_input_fn(
+ config=config, data_dir=FLAGS.data_dir, split="test")
+
+ # Disabling a range within an else block currently doesn't work
+ # due to https://github.com/PyCQA/pylint/issues/872
+ # pylint: disable=protected-access
+ if FLAGS.mode == "eval":
+ # TPUEstimator.evaluate *requires* a steps argument.
+ # Note that the number of examples used during evaluation is
+ # --eval_steps * --batch_size.
+ # So if you change --batch_size then change --eval_steps too.
+ eval_steps = 10000 // config.tpu_eval_batch_size
+
+ # Run evaluation when there's a new checkpoint
+ for ckpt in evaluation.checkpoints_iterator(
+ FLAGS.model_dir, timeout=FLAGS.eval_timeout):
+ tf.logging.info("Starting to evaluate.")
+ try:
+ start_timestamp = time.time() # This time will include compilation time
+ eval_results = estimator.evaluate(
+ input_fn=eval_input_fn, steps=eval_steps, checkpoint_path=ckpt)
+ elapsed_time = int(time.time() - start_timestamp)
+ tf.logging.info("Eval results: %s. Elapsed seconds: %d" %
+ (eval_results, elapsed_time))
+
+ # Terminate eval job when final checkpoint is reached
+ current_step = int(os.path.basename(ckpt).split("-")[1])
+ if current_step >= config.max_train_iter:
+ tf.logging.info(
+ "Evaluation finished after training step %d" % current_step)
+ break
+
+ except tf.errors.NotFoundError:
+ # Since the coordinator is on a different job than the TPU worker,
+ # sometimes the TPU worker does not finish initializing until long after
+ # the CPU job tells it to start evaluating. In this case, the checkpoint
+ # file could have been deleted already.
+ tf.logging.info(
+ "Checkpoint %s no longer exists, skipping checkpoint" % ckpt)
+
+ else: # FLAGS.mode == 'train' or FLAGS.mode == 'train_and_eval'
+ current_step = estimator_._load_global_step_from_checkpoint_dir(
+ FLAGS.model_dir)
+ tf.logging.info("Training for %d steps . Current"
+ " step %d." % (config.max_train_iter, current_step))
+
+ start_timestamp = time.time() # This time will include compilation time
+ if FLAGS.mode == "train":
+ estimator.train(input_fn=train_input_fn, max_steps=config.max_train_iter)
+ else:
+ eval_steps = 10000 // config.tpu_eval_batch_size
+ assert FLAGS.mode == "train_and_eval"
+ while current_step < config.max_train_iter:
+ # Train for up to steps_per_eval number of steps.
+ # At the end of training, a checkpoint will be written to --model_dir.
+ next_checkpoint = min(current_step + FLAGS.steps_per_eval,
+ config.max_train_iter)
+ estimator.train(input_fn=train_input_fn, max_steps=next_checkpoint)
+ current_step = next_checkpoint
+
+ # Evaluate the model on the most recent model in --model_dir.
+ # Since evaluation happens in batches of --eval_batch_size, some images
+ # may be consistently excluded modulo the batch size.
+ tf.logging.info("Starting to evaluate.")
+ eval_results = estimator.evaluate(
+ input_fn=eval_input_fn, steps=eval_steps)
+ tf.logging.info("Eval results: %s" % eval_results)
+
+ elapsed_time = int(time.time() - start_timestamp)
+ tf.logging.info("Finished training up to step %d. Elapsed seconds %d." %
+ (config.max_train_iter, elapsed_time))
+ # pylint: enable=protected-access
+
+
+if __name__ == "__main__":
+ # Cloud TPU Cluster Resolver flags
+ flags.DEFINE_string(
+ "tpu",
+ default=None,
+ help="The Cloud TPU to use for training. This should be either the name "
+ "used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
+ "url.")
+ flags.DEFINE_string(
+ "tpu_zone",
+ default=None,
+ help="[Optional] GCE zone where the Cloud TPU is located in. If not "
+ "specified, we will attempt to automatically detect the GCE project from "
+ "metadata.")
+ flags.DEFINE_string(
+ "gcp_project",
+ default=None,
+ help="[Optional] Project name for the Cloud TPU-enabled project. If not "
+ "specified, we will attempt to automatically detect the GCE project from "
+ "metadata.")
+
+ # Model specific parameters
+ flags.DEFINE_string(
+ "data_dir", default=None, help="Directory to load tfrecords")
+ flags.DEFINE_string(
+ "model_dir",
+ default=None,
+ help="[Optional] Directory to store the model information")
+ flags.DEFINE_string(
+ "dataset",
+ default="cifar-10",
+ help="[Optional] The dataset used; either `cifar-10` or `cifar-100`")
+ flags.DEFINE_string(
+ "config",
+ default="revnet-38",
+ help="[Optional] Architecture of network. "
+ "Other options include `revnet-110` and `revnet-164`")
+ flags.DEFINE_boolean(
+ "use_tpu", default=True, help="[Optional] Whether to use TPU")
+ flags.DEFINE_integer(
+ "num_shards", default=8, help="Number of shards (TPU chips).")
+ flags.DEFINE_integer(
+ "iterations_per_loop",
+ default=100,
+ help=(
+ "Number of steps to run on TPU before feeding metrics to the CPU."
+ " If the number of iterations in the loop would exceed the number of"
+ " train steps, the loop will exit before reaching"
+ " --iterations_per_loop. The larger this value is, the higher the"
+ " utilization on the TPU."))
+ flags.DEFINE_string(
+ "mode",
+ default="train_and_eval",
+ help="[Optional] Mode to run: train, eval, train_and_eval")
+ flags.DEFINE_integer(
+ "eval_timeout", 60 * 60 * 24,
+ "Maximum seconds between checkpoints before evaluation terminates.")
+ flags.DEFINE_integer(
+ "steps_per_eval",
+ default=1000,
+ help=(
+ "Controls how often evaluation is performed. Since evaluation is"
+ " fairly expensive, it is advised to evaluate as infrequently as"
+ " possible (i.e. up to --train_steps, which evaluates the model only"
+ " after finishing the entire training regime)."))
+ FLAGS = flags.FLAGS
+ tf.app.run(main=main, argv=[FLAGS])
diff --git a/tensorflow/contrib/eager/python/examples/revnet/revnet.py b/tensorflow/contrib/eager/python/examples/revnet/revnet.py
index af0d20fa72..b1cb312b74 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/revnet.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/revnet.py
@@ -24,9 +24,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-import functools
-import operator
-
import six
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.revnet import blocks
@@ -45,71 +42,9 @@ class RevNet(tf.keras.Model):
self.axis = 1 if config.data_format == "channels_first" else 3
self.config = config
- self._init_block = self._construct_init_block()
+ self._init_block = blocks.InitBlock(config=self.config)
+ self._final_block = blocks.FinalBlock(config=self.config)
self._block_list = self._construct_intermediate_blocks()
- self._final_block = self._construct_final_block()
-
- def _construct_init_block(self):
- init_block = tf.keras.Sequential(
- [
- tf.keras.layers.Conv2D(
- filters=self.config.init_filters,
- kernel_size=self.config.init_kernel,
- strides=(self.config.init_stride, self.config.init_stride),
- data_format=self.config.data_format,
- use_bias=False,
- padding="SAME",
- input_shape=self.config.input_shape,
- dtype=self.config.dtype),
- tf.keras.layers.BatchNormalization(
- axis=self.axis,
- fused=self.config.fused,
- dtype=self.config.dtype),
- tf.keras.layers.Activation("relu"),
- ],
- name="init")
- if self.config.init_max_pool:
- init_block.add(
- tf.keras.layers.MaxPooling2D(
- pool_size=(3, 3),
- strides=(2, 2),
- padding="SAME",
- data_format=self.config.data_format,
- dtype=self.config.dtype))
- return init_block
-
- def _construct_final_block(self):
- f = self.config.filters[-1] # Number of filters
- r = functools.reduce(operator.mul, self.config.strides, 1) # Reduce ratio
- r *= self.config.init_stride
- if self.config.init_max_pool:
- r *= 2
-
- if self.config.data_format == "channels_first":
- w, h = self.config.input_shape[1], self.config.input_shape[2]
- input_shape = (f, w // r, h // r)
- elif self.config.data_format == "channels_last":
- w, h = self.config.input_shape[0], self.config.input_shape[1]
- input_shape = (w // r, h // r, f)
- else:
- raise ValueError("Data format should be either `channels_first`"
- " or `channels_last`")
-
- final_block = tf.keras.Sequential(
- [
- tf.keras.layers.BatchNormalization(
- axis=self.axis,
- input_shape=input_shape,
- fused=self.config.fused,
- dtype=self.config.dtype),
- tf.keras.layers.Activation("relu"),
- tf.keras.layers.GlobalAveragePooling2D(
- data_format=self.config.data_format, dtype=self.config.dtype),
- tf.keras.layers.Dense(
- self.config.n_classes, dtype=self.config.dtype)
- ],
- name="final")
- return final_block
def _construct_intermediate_blocks(self):
# Precompute input shape after initial block
@@ -206,13 +141,20 @@ class RevNet(tf.keras.Model):
l2_reg: Apply l2 regularization
Returns:
- list of tuples each being (grad, var) for optimizer to use
+ A tuple with the first entry being a list of all gradients, the second
+ entry being a list of respective variables, the third being the logits,
+ and the forth being the loss
"""
- # Run forward pass to record hidden states; avoid updating running averages
+ # Run forward pass to record hidden states
vars_and_vals = self.get_moving_stats()
- _, saved_hidden = self.call(inputs, training=training)
- self.restore_moving_stats(vars_and_vals)
+ _, saved_hidden = self(inputs, training=training) # pylint:disable=not-callable
+ if tf.executing_eagerly():
+ # Restore moving averages when executing eagerly to avoid updating twice
+ self.restore_moving_stats(vars_and_vals)
+ else:
+ # Fetch batch norm updates in graph mode
+ updates = self.get_updates_for(inputs)
grads_all = []
vars_all = []
@@ -220,9 +162,8 @@ class RevNet(tf.keras.Model):
# Manually backprop through last block
x = saved_hidden[-1]
with tf.GradientTape() as tape:
- x = tf.identity(x)
tape.watch(x)
- # Running stats updated below
+ # Running stats updated here
logits = self._final_block(x, training=training)
loss = self.compute_loss(logits, labels)
@@ -236,6 +177,7 @@ class RevNet(tf.keras.Model):
for block in reversed(self._block_list):
y = saved_hidden.pop()
x = saved_hidden[-1]
+ # Running stats updated here
dy, grads, vars_ = block.backward_grads_and_vars(
x, y, dy, training=training)
grads_all += grads
@@ -247,8 +189,7 @@ class RevNet(tf.keras.Model):
assert not saved_hidden # Cleared after backprop
with tf.GradientTape() as tape:
- x = tf.identity(x)
- # Running stats updated below
+ # Running stats updated here
y = self._init_block(x, training=training)
grads_all += tape.gradient(
@@ -259,7 +200,13 @@ class RevNet(tf.keras.Model):
if l2_reg:
grads_all = self._apply_weight_decay(grads_all, vars_all)
- return grads_all, vars_all, loss
+ if not tf.executing_eagerly():
+ # Force updates to be executed before gradient computation in graph mode
+ # This does nothing when the function is wrapped in defun
+ with tf.control_dependencies(updates):
+ grads_all[0] = tf.identity(grads_all[0])
+
+ return grads_all, vars_all, logits, loss
def _apply_weight_decay(self, grads, vars_):
"""Update gradients to reflect weight decay."""
@@ -284,8 +231,10 @@ class RevNet(tf.keras.Model):
n = v.name
return n.endswith("moving_mean:0") or n.endswith("moving_variance:0")
- for v in filter(_is_moving_var, self.variables):
- vars_and_vals[v] = v.read_value()
+ device = "/gpu:0" if tf.test.is_gpu_available() else "/cpu:0"
+ with tf.device(device):
+ for v in filter(_is_moving_var, self.variables):
+ vars_and_vals[v] = v.read_value()
return vars_and_vals
@@ -297,5 +246,8 @@ class RevNet(tf.keras.Model):
Args:
vars_and_vals: The dictionary mapping variables to their previous values.
"""
- for var_, val in six.iteritems(vars_and_vals):
- var_.assign(val)
+ device = "/gpu:0" if tf.test.is_gpu_available() else "/cpu:0"
+ with tf.device(device):
+ for var_, val in six.iteritems(vars_and_vals):
+ # `assign` causes a copy to GPU (if variable is already on GPU)
+ var_.assign(val)
diff --git a/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py b/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
index b2ac4b67c9..26b0847523 100644
--- a/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
+++ b/tensorflow/contrib/eager/python/examples/revnet/revnet_test.py
@@ -31,10 +31,11 @@ tfe = tf.contrib.eager
def train_one_iter(model, inputs, labels, optimizer, global_step=None):
"""Train for one iteration."""
- grads, vars_, loss = model.compute_gradients(inputs, labels, training=True)
+ grads, vars_, logits, loss = model.compute_gradients(
+ inputs, labels, training=True)
optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
- return loss
+ return logits, loss
class RevNetTest(tf.test.TestCase):
@@ -42,6 +43,8 @@ class RevNetTest(tf.test.TestCase):
def setUp(self):
super(RevNetTest, self).setUp()
config = config_.get_hparams_cifar_38()
+ config.add_hparam("n_classes", 10)
+ config.add_hparam("dataset", "cifar-10")
# Reconstruction could cause numerical error, use double precision for tests
config.dtype = tf.float64
config.fused = False # Fused batch norm does not support tf.float64
@@ -94,7 +97,7 @@ class RevNetTest(tf.test.TestCase):
def test_compute_gradients(self):
"""Test `compute_gradients` function."""
self.model(self.x, training=False) # Initialize model
- grads, vars_, loss = self.model.compute_gradients(
+ grads, vars_, logits, loss = self.model.compute_gradients(
inputs=self.x, labels=self.t, training=True, l2_reg=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
@@ -119,7 +122,7 @@ class RevNetTest(tf.test.TestCase):
def test_compute_gradients_defun(self):
"""Test `compute_gradients` function with defun."""
compute_gradients = tfe.defun(self.model.compute_gradients)
- grads, vars_, _ = compute_gradients(self.x, self.t, training=True)
+ grads, vars_, _, _ = compute_gradients(self.x, self.t, training=True)
self.assertTrue(isinstance(grads, list))
self.assertTrue(isinstance(vars_, list))
self.assertEqual(len(grads), len(vars_))
@@ -131,6 +134,9 @@ class RevNetTest(tf.test.TestCase):
"""Test model training in graph mode."""
with tf.Graph().as_default():
config = config_.get_hparams_cifar_38()
+ config.add_hparam("n_classes", 10)
+ config.add_hparam("dataset", "cifar-10")
+
x = tf.random_normal(
shape=(self.config.batch_size,) + self.config.input_shape)
t = tf.random_uniform(
@@ -138,17 +144,12 @@ class RevNetTest(tf.test.TestCase):
minval=0,
maxval=self.config.n_classes,
dtype=tf.int32)
- global_step = tfe.Variable(0., trainable=False)
+ global_step = tf.Variable(0., trainable=False)
model = revnet.RevNet(config=config)
- model(x)
- updates = model.get_updates_for(x)
-
- x_ = tf.identity(x)
- grads_all, vars_all, _ = model.compute_gradients(x_, t, training=True)
+ grads_all, vars_all, _, _ = model.compute_gradients(x, t, training=True)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
- with tf.control_dependencies(updates):
- train_op = optimizer.apply_gradients(
- zip(grads_all, vars_all), global_step=global_step)
+ train_op = optimizer.apply_gradients(
+ zip(grads_all, vars_all), global_step=global_step)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
diff --git a/tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb.py b/tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb.py
index c2340a293a..15776c694e 100644
--- a/tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb.py
+++ b/tensorflow/contrib/eager/python/examples/rnn_ptb/rnn_ptb.py
@@ -310,12 +310,12 @@ def main(_):
with tf.device("/device:GPU:0" if have_gpu else None):
# Make learning_rate a Variable so it can be included in the checkpoint
# and we can resume training with the last saved learning_rate.
- learning_rate = tfe.Variable(20.0, name="learning_rate")
+ learning_rate = tf.Variable(20.0, name="learning_rate")
model = PTBModel(corpus.vocab_size(), FLAGS.embedding_dim,
FLAGS.hidden_dim, FLAGS.num_layers, FLAGS.dropout,
use_cudnn_rnn)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
- checkpoint = tfe.Checkpoint(
+ checkpoint = tf.train.Checkpoint(
learning_rate=learning_rate, model=model,
# GradientDescentOptimizer has no state to checkpoint, but noting it
# here lets us swap in an optimizer that does.
diff --git a/tensorflow/contrib/eager/python/examples/sagan/sagan.py b/tensorflow/contrib/eager/python/examples/sagan/sagan.py
index 561be36c91..8130414985 100644
--- a/tensorflow/contrib/eager/python/examples/sagan/sagan.py
+++ b/tensorflow/contrib/eager/python/examples/sagan/sagan.py
@@ -62,7 +62,7 @@ class SelfAttentionModule(tf.keras.Model):
kernel_size=1,
strides=(1, 1),
data_format=data_format)
- self.scale = tfe.Variable(0., trainable=True)
+ self.scale = tf.Variable(0., trainable=True)
def call(self, x):
f = self.f(x)
diff --git a/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb b/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb
index 4f1410e00b..f3a65f5aab 100644
--- a/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb
+++ b/tensorflow/contrib/eager/python/examples/workshop/2_models.ipynb
@@ -69,7 +69,7 @@
"cell_type": "code",
"source": [
"# Creating variables\n",
- "v = tfe.Variable(1.0)\n",
+ "v = tf.Variable(1.0)\n",
"v"
],
"execution_count": 2,
diff --git a/tensorflow/contrib/eager/python/metrics_impl.py b/tensorflow/contrib/eager/python/metrics_impl.py
index efa6ba0626..6efafccd6b 100644
--- a/tensorflow/contrib/eager/python/metrics_impl.py
+++ b/tensorflow/contrib/eager/python/metrics_impl.py
@@ -291,8 +291,6 @@ class Metric(checkpointable.CheckpointableBase):
class Mean(Metric):
"""Computes the (weighted) mean of the given values."""
- # TODO(josh11b): Maybe have a dtype argument that defaults to tf.float64?
- # Or defaults to type of the input if it is tf.float32, else tf.float64?
def __init__(self, name=None, dtype=dtypes.float64,
use_global_variables=False):
@@ -377,7 +375,7 @@ class Accuracy(Mean):
array_ops.shape(labels), array_ops.shape(predictions),
message="Shapes of labels and predictions are unequal")
matches = math_ops.equal(labels, predictions)
- matches = math_ops.cast(matches, dtypes.float64)
+ matches = math_ops.cast(matches, self.dtype)
super(Accuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
@@ -421,7 +419,7 @@ class CategoricalAccuracy(Mean):
labels = math_ops.argmax(labels, axis=-1)
predictions = math_ops.argmax(predictions, axis=-1)
matches = math_ops.equal(labels, predictions)
- matches = math_ops.cast(matches, dtypes.float64)
+ matches = math_ops.cast(matches, self.dtype)
super(CategoricalAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
@@ -472,7 +470,7 @@ class BinaryAccuracy(Mean):
predictions = ops.convert_to_tensor(predictions)
predictions = predictions > self.threshold
matches = math_ops.equal(labels, predictions)
- matches = math_ops.cast(matches, dtypes.float64)
+ matches = math_ops.cast(matches, self.dtype)
super(BinaryAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
@@ -520,7 +518,7 @@ class SparseAccuracy(Mean):
predictions = math_ops.argmax(predictions, axis=-1)
labels = math_ops.cast(labels, dtypes.int64)
matches = math_ops.equal(labels, predictions)
- matches = math_ops.cast(matches, dtypes.float64)
+ matches = math_ops.cast(matches, self.dtype)
super(SparseAccuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
diff --git a/tensorflow/contrib/eager/python/saver.py b/tensorflow/contrib/eager/python/saver.py
index fdaca90fd1..d709308647 100644
--- a/tensorflow/contrib/eager/python/saver.py
+++ b/tensorflow/contrib/eager/python/saver.py
@@ -125,8 +125,8 @@ class Saver(object):
Args:
var_list: The list of variables that will be saved and restored. Either a
- list of `tfe.Variable` objects, or a dictionary mapping names to
- `tfe.Variable` objects.
+ list of `tf.Variable` objects, or a dictionary mapping names to
+ `tf.Variable` objects.
Raises:
RuntimeError: if invoked when eager execution has not been enabled.
diff --git a/tensorflow/contrib/eager/python/tfe_test.py b/tensorflow/contrib/eager/python/tfe_test.py
index db50b33af2..4454abfb96 100644
--- a/tensorflow/contrib/eager/python/tfe_test.py
+++ b/tensorflow/contrib/eager/python/tfe_test.py
@@ -27,7 +27,6 @@ from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
-from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer
@@ -45,12 +44,6 @@ class TFETest(test_util.TensorFlowTestCase):
r'indices = 7 is not in \[0, 3\)'):
array_ops.gather([0, 1, 2], 7)
- def testVariableError(self):
- with self.assertRaisesRegexp(
- RuntimeError,
- r'Variable not supported when eager execution is enabled'):
- variables.Variable(initial_value=1.0)
-
def testGradients(self):
def square(x):
diff --git a/tensorflow/contrib/estimator/BUILD b/tensorflow/contrib/estimator/BUILD
index 11d40f5982..1aa3df8d8d 100644
--- a/tensorflow/contrib/estimator/BUILD
+++ b/tensorflow/contrib/estimator/BUILD
@@ -28,7 +28,7 @@ py_library(
":multi_head",
":replicate_model_fn",
":rnn",
- "//tensorflow/python:util",
+ "//tensorflow:tensorflow_py_no_contrib",
],
)
@@ -54,22 +54,10 @@ py_test(
deps = [
":baseline",
":head",
- "//tensorflow/python:check_ops",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:control_flow_ops",
- "//tensorflow/python:dtypes",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:platform",
- "//tensorflow/python:session",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
- "//tensorflow/python:variables",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:export_export",
"//tensorflow/python/estimator:metric_keys",
"//tensorflow/python/estimator:numpy_io",
- "//tensorflow/python/feature_column",
- "//tensorflow/python/ops/losses",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -96,11 +84,8 @@ py_test(
],
deps = [
":boosted_trees",
- "//tensorflow/python:dtypes",
- "//tensorflow/python:framework_test_lib",
- "//tensorflow/python:training",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:numpy_io",
- "//tensorflow/python/feature_column",
"//third_party/py/numpy",
],
)
@@ -110,7 +95,7 @@ py_library(
srcs = ["python/estimator/dnn.py"],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:nn",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
"//tensorflow/python/estimator:dnn",
],
@@ -129,16 +114,11 @@ py_test(
deps = [
":dnn",
":head",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:platform",
- "//tensorflow/python:summary",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:dnn_testing_utils",
"//tensorflow/python/estimator:export_export",
"//tensorflow/python/estimator:numpy_io",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/feature_column",
- "//tensorflow/python/ops/losses",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -149,7 +129,7 @@ py_library(
srcs = ["python/estimator/dnn_linear_combined.py"],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:nn",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
"//tensorflow/python/estimator:dnn_linear_combined",
],
@@ -168,18 +148,12 @@ py_test(
deps = [
":dnn_linear_combined",
":head",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:nn",
- "//tensorflow/python:platform",
- "//tensorflow/python:summary",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:dnn_testing_utils",
"//tensorflow/python/estimator:export_export",
"//tensorflow/python/estimator:linear_testing_utils",
"//tensorflow/python/estimator:numpy_io",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/feature_column",
- "//tensorflow/python/ops/losses",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -192,10 +166,7 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:clip_ops",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:sparse_tensor",
- "//tensorflow/python:training",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/estimator:util",
@@ -211,18 +182,11 @@ py_test(
tags = ["notsan"], # b/62863147
deps = [
":extenders",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/contrib/data/python/ops:dataset_ops",
"//tensorflow/contrib/predictor",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:constant_op",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:metrics",
- "//tensorflow/python:sparse_tensor",
- "//tensorflow/python:training",
- "//tensorflow/python:variables",
"//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/estimator:linear",
- "//tensorflow/python/feature_column",
"//third_party/py/numpy",
],
)
@@ -246,21 +210,11 @@ py_test(
tags = ["notsan"], # b/62863147
deps = [
":export",
- "//tensorflow/python:array_ops",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:metrics",
- "//tensorflow/python:parsing_ops",
- "//tensorflow/python:session",
- "//tensorflow/python:state_ops",
- "//tensorflow/python:training",
- "//tensorflow/python:util",
- "//tensorflow/python:variables",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
"//tensorflow/python/estimator:export_export",
"//tensorflow/python/estimator:export_output",
"//tensorflow/python/estimator:model_fn",
- "//tensorflow/python/saved_model:loader",
- "//tensorflow/python/saved_model:tag_constants",
],
)
@@ -271,25 +225,12 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:array_ops",
- "//tensorflow/python:check_ops",
- "//tensorflow/python:dtypes",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:lookup_ops",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:metrics",
- "//tensorflow/python:nn",
- "//tensorflow/python:sparse_ops",
- "//tensorflow/python:sparse_tensor",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:export_output",
"//tensorflow/python/estimator:head",
"//tensorflow/python/estimator:metric_keys",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/ops/losses",
- "//tensorflow/python/saved_model:signature_constants",
],
)
@@ -300,25 +241,10 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":head",
- "//tensorflow/core:protos_all_py",
- "//tensorflow/python:array_ops",
- "//tensorflow/python:check_ops",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:constant_op",
- "//tensorflow/python:control_flow_ops",
- "//tensorflow/python:dtypes",
- "//tensorflow/python:errors",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:sparse_tensor",
- "//tensorflow/python:string_ops",
- "//tensorflow/python:training",
- "//tensorflow/python:variables",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:metric_keys",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/ops/losses",
- "//tensorflow/python/saved_model:signature_constants",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -331,8 +257,7 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:training",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:estimator_py",
],
)
@@ -345,10 +270,7 @@ py_test(
tags = ["notsan"],
deps = [
":hooks",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:training",
- "//tensorflow/python/data/ops:dataset_ops",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:estimator_py",
"//third_party/py/numpy",
"@six_archive//:six",
@@ -377,16 +299,11 @@ py_test(
deps = [
":head",
":linear",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:platform",
- "//tensorflow/python:summary",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:export_export",
"//tensorflow/python/estimator:linear_testing_utils",
"//tensorflow/python/estimator:numpy_io",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/feature_column",
- "//tensorflow/python/ops/losses",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -399,8 +316,7 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:util",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:dnn",
"//tensorflow/python/estimator:linear",
],
@@ -413,9 +329,7 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":logit_fns",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:constant_op",
- "//tensorflow/python:session",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:model_fn",
],
)
@@ -427,18 +341,11 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:array_ops",
- "//tensorflow/python:control_flow_ops",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:metrics",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:export_output",
"//tensorflow/python/estimator:head",
"//tensorflow/python/estimator:metric_keys",
"//tensorflow/python/estimator:model_fn",
- "//tensorflow/python/saved_model:signature_constants",
"@six_archive//:six",
],
)
@@ -451,15 +358,10 @@ py_test(
deps = [
":head",
":multi_head",
- "//tensorflow/core:protos_all_py",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:constant_op",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:string_ops",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:metric_keys",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/saved_model:signature_constants",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -472,24 +374,10 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/core:protos_all_py",
- "//tensorflow/python:array_ops",
- "//tensorflow/python:control_flow_ops",
- "//tensorflow/python:device",
- "//tensorflow/python:device_lib",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:platform",
- "//tensorflow/python:sparse_ops",
- "//tensorflow/python:sparse_tensor",
- "//tensorflow/python:state_ops",
- "//tensorflow/python:training",
- "//tensorflow/python:util",
- "//tensorflow/python:variable_scope",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator:export_output",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/estimator:util",
- "//tensorflow/python/ops/losses",
"@six_archive//:six",
],
)
@@ -500,6 +388,7 @@ cuda_py_test(
srcs = ["python/estimator/replicate_model_fn_test.py"],
additional_deps = [
"@absl_py//absl/testing:parameterized",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
"//tensorflow/python/estimator:dnn",
"//tensorflow/python/estimator:export_export",
@@ -508,21 +397,6 @@ cuda_py_test(
"//tensorflow/python/estimator:numpy_io",
"//tensorflow/python/estimator:optimizers",
"//tensorflow/python/estimator:prediction_keys",
- "//tensorflow/python/feature_column",
- "//tensorflow/python/ops/losses",
- "//tensorflow/python/saved_model:signature_constants",
- "//tensorflow/python:array_ops",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:control_flow_ops",
- "//tensorflow/python:framework_for_generated_wrappers",
- "//tensorflow/python:framework_test_lib",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:metrics",
- "//tensorflow/python:platform",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
- "//tensorflow/python:variable_scope",
- "//tensorflow/python:variables",
":replicate_model_fn",
],
tags = [
@@ -538,22 +412,11 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":extenders",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/contrib/feature_column:feature_column_py",
- "//tensorflow/python:array_ops",
- "//tensorflow/python:check_ops",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:init_ops",
- "//tensorflow/python:layers",
- "//tensorflow/python:partitioned_variables",
- "//tensorflow/python:rnn",
- "//tensorflow/python:rnn_cell",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
- "//tensorflow/python:variable_scope",
"//tensorflow/python/estimator",
"//tensorflow/python/estimator:head",
"//tensorflow/python/estimator:optimizers",
- "//tensorflow/python/feature_column",
"@six_archive//:six",
],
)
@@ -572,21 +435,10 @@ py_test(
deps = [
":head",
":rnn",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/contrib/data",
- "//tensorflow/core:protos_all_py",
- "//tensorflow/python:check_ops",
- "//tensorflow/python:client_testlib",
- "//tensorflow/python:dtypes",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:lib",
- "//tensorflow/python:math_ops",
- "//tensorflow/python:state_ops",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
- "//tensorflow/python:variables",
"//tensorflow/python/estimator:numpy_io",
"//tensorflow/python/estimator:parsing_utils",
- "//tensorflow/python/feature_column",
"//third_party/py/numpy",
"@six_archive//:six",
],
@@ -597,13 +449,7 @@ py_library(
srcs = ["python/estimator/early_stopping.py"],
srcs_version = "PY2AND3",
deps = [
- "//tensorflow/python:dtypes",
- "//tensorflow/python:framework_ops",
- "//tensorflow/python:init_ops",
- "//tensorflow/python:platform",
- "//tensorflow/python:state_ops",
- "//tensorflow/python:summary",
- "//tensorflow/python:training",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
],
)
@@ -614,7 +460,7 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":early_stopping",
- "//tensorflow/python:client_testlib",
+ "//tensorflow:tensorflow_py_no_contrib",
"//tensorflow/python/estimator",
"@absl_py//absl/testing:parameterized",
],
diff --git a/tensorflow/contrib/estimator/python/estimator/early_stopping.py b/tensorflow/contrib/estimator/python/estimator/early_stopping.py
index af4855e91e..3eab21d5ac 100644
--- a/tensorflow/contrib/estimator/python/estimator/early_stopping.py
+++ b/tensorflow/contrib/estimator/python/estimator/early_stopping.py
@@ -394,10 +394,11 @@ def _summaries(eval_dir):
Yields:
`tensorflow.Event` object read from the event files.
"""
- for event_file in gfile.Glob(
- os.path.join(eval_dir, _EVENT_FILE_GLOB_PATTERN)):
- for event in summary_iterator.summary_iterator(event_file):
- yield event
+ if gfile.Exists(eval_dir):
+ for event_file in gfile.Glob(
+ os.path.join(eval_dir, _EVENT_FILE_GLOB_PATTERN)):
+ for event in summary_iterator.summary_iterator(event_file):
+ yield event
def _get_or_create_stop_var():
diff --git a/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py b/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py
index b5eee818fa..e4bfd4b446 100644
--- a/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/early_stopping_test.py
@@ -92,6 +92,19 @@ class ReadEvalMetricsTest(test.TestCase):
},
}, early_stopping.read_eval_metrics(eval_dir))
+ def test_read_eval_metrics_when_no_events(self):
+ eval_dir = tempfile.mkdtemp()
+ self.assertTrue(os.path.exists(eval_dir))
+
+ # No error should be raised when eval directory exists with no event files.
+ self.assertEqual({}, early_stopping.read_eval_metrics(eval_dir))
+
+ os.rmdir(eval_dir)
+ self.assertFalse(os.path.exists(eval_dir))
+
+ # No error should be raised when eval directory does not exist.
+ self.assertEqual({}, early_stopping.read_eval_metrics(eval_dir))
+
class EarlyStoppingHooksTest(test.TestCase, parameterized.TestCase):
diff --git a/tensorflow/contrib/estimator/python/estimator/head.py b/tensorflow/contrib/estimator/python/estimator/head.py
index c9d86ef4ab..34f765d565 100644
--- a/tensorflow/contrib/estimator/python/estimator/head.py
+++ b/tensorflow/contrib/estimator/python/estimator/head.py
@@ -943,20 +943,30 @@ class _MultiLabelHead(head_lib._Head): # pylint:disable=protected-access
class_probabilities = array_ops.slice(
probabilities, begin=begin, size=size)
class_labels = array_ops.slice(labels, begin=begin, size=size)
- prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
+ if self._label_vocabulary is None:
+ prob_key = keys.PROBABILITY_MEAN_AT_CLASS % class_id
+ else:
+ prob_key = (
+ keys.PROBABILITY_MEAN_AT_NAME % self._label_vocabulary[class_id])
metric_ops[head_lib._summary_key(self._name, prob_key)] = ( # pylint:disable=protected-access
head_lib._predictions_mean( # pylint:disable=protected-access
predictions=class_probabilities,
weights=weights,
name=prob_key))
- auc_key = keys.AUC_AT_CLASS % class_id
+ if self._label_vocabulary is None:
+ auc_key = keys.AUC_AT_CLASS % class_id
+ else:
+ auc_key = keys.AUC_AT_NAME % self._label_vocabulary[class_id]
metric_ops[head_lib._summary_key(self._name, auc_key)] = ( # pylint:disable=protected-access
head_lib._auc( # pylint:disable=protected-access
labels=class_labels,
predictions=class_probabilities,
weights=weights,
name=auc_key))
- auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
+ if self._label_vocabulary is None:
+ auc_pr_key = keys.AUC_PR_AT_CLASS % class_id
+ else:
+ auc_pr_key = keys.AUC_PR_AT_NAME % self._label_vocabulary[class_id]
metric_ops[head_lib._summary_key(self._name, auc_pr_key)] = ( # pylint:disable=protected-access
head_lib._auc( # pylint:disable=protected-access
labels=class_labels,
diff --git a/tensorflow/contrib/estimator/python/estimator/head_test.py b/tensorflow/contrib/estimator/python/estimator/head_test.py
index 7b884402d4..2d367adb47 100644
--- a/tensorflow/contrib/estimator/python/estimator/head_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/head_test.py
@@ -694,12 +694,14 @@ class MultiLabelHead(test.TestCase):
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7639,
- keys.PROBABILITY_MEAN_AT_CLASS % 0: np.sum(_sigmoid(logits[:, 0])) / 2.,
- keys.AUC_AT_CLASS % 0: 0.,
- keys.AUC_PR_AT_CLASS % 0: 1.,
- keys.PROBABILITY_MEAN_AT_CLASS % 1: np.sum(_sigmoid(logits[:, 1])) / 2.,
- keys.AUC_AT_CLASS % 1: 1.,
- keys.AUC_PR_AT_CLASS % 1: 1.,
+ keys.PROBABILITY_MEAN_AT_NAME % 'a':
+ np.sum(_sigmoid(logits[:, 0])) / 2.,
+ keys.AUC_AT_NAME % 'a': 0.,
+ keys.AUC_PR_AT_NAME % 'a': 1.,
+ keys.PROBABILITY_MEAN_AT_NAME % 'b':
+ np.sum(_sigmoid(logits[:, 1])) / 2.,
+ keys.AUC_AT_NAME % 'b': 1.,
+ keys.AUC_PR_AT_NAME % 'b': 1.,
}
self._test_eval(
diff --git a/tensorflow/contrib/estimator/python/estimator/hooks.py b/tensorflow/contrib/estimator/python/estimator/hooks.py
index ddd6aa442f..caadafdfa6 100644
--- a/tensorflow/contrib/estimator/python/estimator/hooks.py
+++ b/tensorflow/contrib/estimator/python/estimator/hooks.py
@@ -189,7 +189,7 @@ class InMemoryEvaluatorHook(training.SessionRunHook):
init_fn=feed_variables, copy_from_scaffold=self._scaffold)
with self._graph.as_default():
- return self._estimator._evaluate_run(
+ self._estimator._evaluate_run(
checkpoint_path=None,
scaffold=scaffold,
update_op=self._update_op,
diff --git a/tensorflow/contrib/estimator/python/estimator/hooks_test.py b/tensorflow/contrib/estimator/python/estimator/hooks_test.py
index 95ae971852..ee88d5ecf5 100644
--- a/tensorflow/contrib/estimator/python/estimator/hooks_test.py
+++ b/tensorflow/contrib/estimator/python/estimator/hooks_test.py
@@ -102,6 +102,7 @@ class InMemoryEvaluatorHookTest(test.TestCase):
self.assertTrue(os.path.isdir(estimator.eval_dir()))
step_keyword_to_value = summary_step_keyword_to_value_mapping(
estimator.eval_dir())
+
# 4.5 = sum(range(10))/10
# before training
self.assertEqual(4.5, step_keyword_to_value[0]['mean_of_features'])
@@ -110,6 +111,7 @@ class InMemoryEvaluatorHookTest(test.TestCase):
self.assertEqual(4.5, step_keyword_to_value[8]['mean_of_features'])
# end
self.assertEqual(4.5, step_keyword_to_value[10]['mean_of_features'])
+ self.assertEqual(set([0, 4, 8, 10]), set(step_keyword_to_value.keys()))
def test_uses_latest_variable_value(self):
diff --git a/tensorflow/contrib/framework/python/ops/variables.py b/tensorflow/contrib/framework/python/ops/variables.py
index e8e3180019..322d5c335e 100644
--- a/tensorflow/contrib/framework/python/ops/variables.py
+++ b/tensorflow/contrib/framework/python/ops/variables.py
@@ -34,6 +34,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
+from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
@@ -199,10 +200,20 @@ def global_variable(initial_value,
@contrib_add_arg_scope
-def variable(name, shape=None, dtype=None, initializer=None,
- regularizer=None, trainable=True, collections=None,
- caching_device=None, device=None,
- partitioner=None, custom_getter=None, use_resource=None):
+def variable(name,
+ shape=None,
+ dtype=None,
+ initializer=None,
+ regularizer=None,
+ trainable=True,
+ collections=None,
+ caching_device=None,
+ device=None,
+ partitioner=None,
+ custom_getter=None,
+ use_resource=None,
+ synchronization=variables.VariableSynchronization.AUTO,
+ aggregation=variables.VariableAggregation.NONE):
"""Gets an existing variable with these parameters or creates a new one.
Args:
@@ -228,6 +239,15 @@ def variable(name, shape=None, dtype=None, initializer=None,
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
Returns:
The created or existing variable.
@@ -242,21 +262,36 @@ def variable(name, shape=None, dtype=None, initializer=None,
getter = functools.partial(custom_getter,
reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
- return getter(name, shape=shape, dtype=dtype,
- initializer=initializer,
- regularizer=regularizer,
- trainable=trainable,
- collections=collections,
- caching_device=caching_device,
- partitioner=partitioner,
- use_resource=use_resource)
+ return getter(
+ name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ trainable=trainable,
+ collections=collections,
+ caching_device=caching_device,
+ partitioner=partitioner,
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
@contrib_add_arg_scope
-def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
- regularizer=None, trainable=True, collections=None,
- caching_device=None, device=None, partitioner=None,
- custom_getter=None, use_resource=None):
+def model_variable(name,
+ shape=None,
+ dtype=dtypes.float32,
+ initializer=None,
+ regularizer=None,
+ trainable=True,
+ collections=None,
+ caching_device=None,
+ device=None,
+ partitioner=None,
+ custom_getter=None,
+ use_resource=None,
+ synchronization=variables.VariableSynchronization.AUTO,
+ aggregation=variables.VariableAggregation.NONE):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
@@ -283,18 +318,36 @@ def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
- var = variable(name, shape=shape, dtype=dtype,
- initializer=initializer, regularizer=regularizer,
- trainable=trainable, collections=collections,
- caching_device=caching_device, device=device,
- partitioner=partitioner, custom_getter=custom_getter,
- use_resource=use_resource)
+ var = variable(
+ name,
+ shape=shape,
+ dtype=dtype,
+ initializer=initializer,
+ regularizer=regularizer,
+ trainable=trainable,
+ collections=collections,
+ caching_device=caching_device,
+ device=device,
+ partitioner=partitioner,
+ custom_getter=custom_getter,
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
return var
diff --git a/tensorflow/contrib/framework/python/ops/variables_test.py b/tensorflow/contrib/framework/python/ops/variables_test.py
index 7e0c7dbec1..3c44630a51 100644
--- a/tensorflow/contrib/framework/python/ops/variables_test.py
+++ b/tensorflow/contrib/framework/python/ops/variables_test.py
@@ -106,8 +106,9 @@ class LocalVariableTest(test.TestCase):
def testResourceVariable(self):
a = variables_lib2.local_variable(0)
b = variables_lib2.local_variable(0, use_resource=True)
- self.assertEqual(type(a), variables_lib.Variable)
- self.assertEqual(type(b), resource_variable_ops.ResourceVariable)
+ self.assertTrue(isinstance(a, variables_lib.Variable))
+ self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
+ self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalVariableTest(test.TestCase):
@@ -176,8 +177,9 @@ class GlobalVariableTest(test.TestCase):
def testResourceVariable(self):
a = variables_lib2.global_variable(0)
b = variables_lib2.global_variable(0, use_resource=True)
- self.assertEqual(type(a), variables_lib.Variable)
- self.assertEqual(type(b), resource_variable_ops.ResourceVariable)
+ self.assertTrue(isinstance(a, variables_lib.Variable))
+ self.assertFalse(isinstance(a, resource_variable_ops.ResourceVariable))
+ self.assertTrue(isinstance(b, resource_variable_ops.ResourceVariable))
class GlobalStepTest(test.TestCase):
diff --git a/tensorflow/contrib/fused_conv/kernels/fused_conv2d_bias_activation_op.cc b/tensorflow/contrib/fused_conv/kernels/fused_conv2d_bias_activation_op.cc
index 2458f7554a..0ccb4583ab 100644
--- a/tensorflow/contrib/fused_conv/kernels/fused_conv2d_bias_activation_op.cc
+++ b/tensorflow/contrib/fused_conv/kernels/fused_conv2d_bias_activation_op.cc
@@ -135,9 +135,12 @@ class FusedConv2DBiasActivationOp : public OpKernel {
context->GetAttr("activation_mode", &activation_mode_str));
OP_REQUIRES_OK(context, GetActivationModeFromString(activation_mode_str,
&activation_mode_));
- OP_REQUIRES(context, activation_mode_ == ActivationMode::RELU,
- errors::InvalidArgument("Current implementation only supports "
- "RELU as the activation function."));
+ OP_REQUIRES(context,
+ activation_mode_ == ActivationMode::RELU ||
+ activation_mode_ == ActivationMode::NONE,
+ errors::InvalidArgument(
+ "Current implementation only supports RELU or NONE "
+ "as the activation function."));
cudnn_use_autotune_ = CudnnUseAutotune();
}
@@ -440,6 +443,8 @@ void LaunchFusedConv2DBiasActivationOp<GPUDevice, T, BiasType, ScaleType>::
: dnn::DataLayout::kBatchDepthYX;
constexpr auto filter_layout = is_int8x4 ? dnn::FilterLayout::kOutputInputYX4
: dnn::FilterLayout::kOutputInputYX;
+ constexpr auto compute_data_format =
+ is_int8x4 ? FORMAT_NCHW_VECT_C : FORMAT_NCHW;
dnn::BatchDescriptor conv_input_desc;
conv_input_desc.set_count(batch_size)
@@ -526,6 +531,7 @@ void LaunchFusedConv2DBiasActivationOp<GPUDevice, T, BiasType, ScaleType>::
batch_size,
conv_input_depth,
{{conv_input_rows, conv_input_cols}},
+ compute_data_format,
output_depth,
{{filter_rows, filter_cols}},
// TODO(yangzihao): Add support for arbitrary dilations for fused conv.
@@ -538,6 +544,18 @@ void LaunchFusedConv2DBiasActivationOp<GPUDevice, T, BiasType, ScaleType>::
activation_mode,
};
+ dnn::ActivationMode dnn_activation_mode;
+ switch (activation_mode) {
+ case ActivationMode::NONE:
+ dnn_activation_mode = dnn::ActivationMode::kNone;
+ break;
+ case ActivationMode::RELU:
+ dnn_activation_mode = dnn::ActivationMode::kRelu;
+ break;
+ default:
+ LOG(FATAL) << "Activation mode " << activation_mode << " not supported";
+ }
+
dnn::AlgorithmConfig algorithm_config;
if (cudnn_use_autotune && !AutoTuneConvBiasActivation::GetInstance()->Find(
fused_conv_parameters, &algorithm_config)) {
@@ -558,10 +576,9 @@ void LaunchFusedConv2DBiasActivationOp<GPUDevice, T, BiasType, ScaleType>::
->ThenFusedConvolveWithAlgorithm(
conv_input_desc, conv_input_ptr, conv_input_scale,
filter_desc, filter_ptr, conv_desc, side_input_ptr,
- side_input_scale, bias_desc, bias_ptr,
- dnn::ActivationMode::kRelu, output_desc, &output_ptr,
- &scratch_allocator, dnn::AlgorithmConfig(profile_algorithm),
- &profile_result)
+ side_input_scale, bias_desc, bias_ptr, dnn_activation_mode,
+ output_desc, &output_ptr, &scratch_allocator,
+ dnn::AlgorithmConfig(profile_algorithm), &profile_result)
.ok();
if (cudnn_launch_status) {
if (profile_result.is_valid()) {
@@ -597,7 +614,7 @@ void LaunchFusedConv2DBiasActivationOp<GPUDevice, T, BiasType, ScaleType>::
->ThenFusedConvolveWithAlgorithm(
conv_input_desc, conv_input_ptr, conv_input_scale, filter_desc,
filter_ptr, conv_desc, side_input_ptr, side_input_scale,
- bias_desc, bias_ptr, dnn::ActivationMode::kRelu, output_desc,
+ bias_desc, bias_ptr, dnn_activation_mode, output_desc,
&output_ptr, &scratch_allocator, algorithm_config,
/*output_profile_result=*/nullptr)
.ok();
diff --git a/tensorflow/contrib/fused_conv/kernels/fused_conv_ops_gpu.h b/tensorflow/contrib/fused_conv/kernels/fused_conv_ops_gpu.h
index ba52697679..b9c131a2e9 100644
--- a/tensorflow/contrib/fused_conv/kernels/fused_conv_ops_gpu.h
+++ b/tensorflow/contrib/fused_conv/kernels/fused_conv_ops_gpu.h
@@ -29,13 +29,13 @@ namespace tensorflow {
class FusedConvParameters : public ConvParameters {
public:
FusedConvParameters(int64 batch, int64 in_depths, const SpatialArray& in,
- int64 out_depths, const SpatialArray& filter,
- const SpatialArray& dilation, const SpatialArray& stride,
- const SpatialArray& padding, DataType dtype,
- int device_id, bool has_side_input,
+ TensorFormat data_format, int64 out_depths,
+ const SpatialArray& filter, const SpatialArray& dilation,
+ const SpatialArray& stride, const SpatialArray& padding,
+ DataType dtype, int device_id, bool has_side_input,
ActivationMode activation_mode)
- : ConvParameters(batch, in_depths, in, out_depths, filter, dilation,
- stride, padding, dtype, device_id),
+ : ConvParameters(batch, in_depths, in, data_format, out_depths, filter,
+ dilation, stride, padding, dtype, device_id),
activation_mode_(activation_mode),
has_side_input_(has_side_input) {
hash_code_ = Hash64Combine(hash_code_, has_side_input);
diff --git a/tensorflow/contrib/fused_conv/ops/fused_conv2d_bias_activation_op.cc b/tensorflow/contrib/fused_conv/ops/fused_conv2d_bias_activation_op.cc
index bafd1d5941..410571f378 100644
--- a/tensorflow/contrib/fused_conv/ops/fused_conv2d_bias_activation_op.cc
+++ b/tensorflow/contrib/fused_conv/ops/fused_conv2d_bias_activation_op.cc
@@ -44,7 +44,7 @@ REGISTER_OP("FusedConv2DBiasActivation")
.Attr(GetPaddingAttrString())
.Attr("data_format: {'NHWC', 'NCHW', 'NCHW_VECT_C'} = 'NHWC'")
.Attr("filter_format: {'HWIO', 'OIHW', 'OIHW_VECT_I'} = 'HWIO'")
- .Attr("activation_mode: {'Relu'} = 'Relu'")
+ .Attr("activation_mode: {'Relu', 'None'} = 'Relu'")
.Attr("dilations: list(int) = [1, 1, 1, 1]")
.SetShapeFn([](shape_inference::InferenceContext* c) {
using shape_inference::ShapeHandle;
@@ -144,7 +144,7 @@ REGISTER_OP("FusedConv2DBiasActivation")
`qint8 [ output_channels, input_channels / 4,
kernel_height, kernel_width, input_channels % 4 ]`
activation_mode: The activation applied to the output.
- Currently must be "Relu".
+ Must be "Relu" or "None".
dilations: 1-D tensor of length 4. The dilation factor for each dimension
of `input`. If set to k > 1, there will be k-1 skipped cells between
each filter element on that dimension. The dimension order is determined
diff --git a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py
index 983b6dc8e5..cdc07b935d 100644
--- a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py
+++ b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op.py
@@ -66,8 +66,10 @@ def fused_conv2d_bias_activation(conv_input,
This is optional and defaults to 0.
side_input: A `Tensor` of the format specified by `data_format`.
This is useful for implementing ResNet blocks.
- activation_mode: (optional) currently must be the default "Relu".
- Note that in qint8 mode, it also clips to 127, so acts like ReluX.
+ activation_mode: (optional) currently supports the default "Relu", or
+ "None" activation function.
+ Note: in qint8 mode, "None" actually clips to the range [-128, 127],
+ while "Relu" clips to the range [0, 127].
data_format: Specifies the data format.
Possible values are:
"NHWC" float [batch, height, width, channels]
diff --git a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
index 4d62ac65ff..0185ef662c 100644
--- a/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
+++ b/tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test.py
@@ -622,7 +622,7 @@ def HwioToOihw(in_tensor):
def SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
padding, strides, side_input_scale,
- side_input, biases):
+ side_input, biases, apply_relu):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
@@ -636,6 +636,9 @@ def SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NCHW_VECT_C layout.
biases: A `Tensor` of type `float32` in NCHW layout.
+ apply_relu: A boolean to specify whether to apply "Relu" activation function
+ that clips outputs to the range [0, 127], or "None" activation that clips
+ to the range [-128, 127].
Returns:
A `Tensor` of type `qint8` in NCHW_VECT_C layout.
"""
@@ -649,10 +652,12 @@ def SimulateFusedConv2dBiasActivationInt8(conv_input_scale, conv_input, kernel,
conv_and_side_inputs = conv_result + side_input_scale * NchwVectCToNchw(
gen_array_ops.dequantize(side_input, -128, 127))
- logit = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NCHW")
+ output = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NCHW")
+ if apply_relu:
+ output = nn_ops.relu(output)
result, _, _ = gen_array_ops.quantize_v2(
- NchwToNchwVectC(nn_ops.relu(logit)), -128, 127, dtypes.qint8)
+ NchwToNchwVectC(output), -128, 127, dtypes.qint8)
return result
@@ -795,7 +800,7 @@ class FusedConvInt8Tests(test.TestCase):
},
]
- def runTest(self, test_param):
+ def runTest(self, test_param, apply_relu):
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
@@ -831,8 +836,8 @@ class FusedConvInt8Tests(test.TestCase):
vertical_stride, padding_type)
output_width = CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride, padding_type)
- tf_logging.info("output_height=", output_height, ", output_width=",
- output_width)
+ tf_logging.info("output_height=", output_height, ", output_width=",
+ output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
@@ -858,12 +863,13 @@ class FusedConvInt8Tests(test.TestCase):
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=side_input,
+ activation_mode="Relu" if apply_relu else "None",
data_format="NCHW_VECT_C",
filter_format="OIHW_VECT_I")
expected = SimulateFusedConv2dBiasActivationInt8(
conv_input_scale, conv_input, kernel, padding_type, strides,
- side_input_scale, side_input, biases)
+ side_input_scale, side_input, biases, apply_relu)
with self.test_session(use_gpu=True) as sess:
actual_y, expected_y = sess.run([actual, expected])
@@ -877,8 +883,9 @@ class FusedConvInt8Tests(test.TestCase):
tf_logging.info("int8 test skipped because not run with --config=cuda or "
"no GPUs with compute capability >= 6.1 are available.")
return
- for test_param in self._test_params:
- self.runTest(test_param)
+ for apply_relu in [True, False]:
+ for test_param in self._test_params:
+ self.runTest(test_param, apply_relu)
if __name__ == "__main__":
diff --git a/tensorflow/contrib/gan/BUILD b/tensorflow/contrib/gan/BUILD
index 10a8796bcb..7e6cb72485 100644
--- a/tensorflow/contrib/gan/BUILD
+++ b/tensorflow/contrib/gan/BUILD
@@ -42,8 +42,10 @@ py_library(
"//tensorflow/contrib/training:training_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:check_ops",
+ "//tensorflow/python:dtypes",
"//tensorflow/python:framework_ops",
"//tensorflow/python:init_ops",
+ "//tensorflow/python:random_ops",
"//tensorflow/python:training",
"//tensorflow/python:training_util",
"//tensorflow/python:variable_scope",
@@ -55,20 +57,22 @@ py_library(
py_test(
name = "train_test",
srcs = ["python/train_test.py"],
+ shard_count = 50,
srcs_version = "PY2AND3",
tags = ["notsan"],
deps = [
- ":features",
":namedtuples",
":random_tensor_pool",
":train",
"//tensorflow/contrib/framework:framework_py",
+ "//tensorflow/contrib/layers:layers_py",
"//tensorflow/contrib/slim:learning",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:constant_op",
"//tensorflow/python:dtypes",
"//tensorflow/python:framework_ops",
+ "//tensorflow/python:math_ops",
"//tensorflow/python:random_ops",
"//tensorflow/python:random_seed",
"//tensorflow/python:training",
@@ -77,6 +81,7 @@ py_test(
"//tensorflow/python:variables",
"//tensorflow/python/ops/distributions",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
],
)
@@ -252,12 +257,15 @@ py_library(
py_test(
name = "random_tensor_pool_test",
srcs = ["python/features/python/random_tensor_pool_test.py"],
+ shard_count = 6,
srcs_version = "PY2AND3",
deps = [
":random_tensor_pool",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
+ "//tensorflow/python:constant_op",
"//tensorflow/python:dtypes",
+ "//tensorflow/python:framework_ops",
"//third_party/py/numpy",
],
)
diff --git a/tensorflow/contrib/gan/python/features/python/random_tensor_pool_impl.py b/tensorflow/contrib/gan/python/features/python/random_tensor_pool_impl.py
index 9e4ec59e70..ca2d724b49 100644
--- a/tensorflow/contrib/gan/python/features/python/random_tensor_pool_impl.py
+++ b/tensorflow/contrib/gan/python/features/python/random_tensor_pool_impl.py
@@ -36,16 +36,15 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
+from tensorflow.python.util import nest
__all__ = [
'tensor_pool',
]
-def _to_tuple(x):
- if isinstance(x, (list, tuple)):
- return tuple(x)
- return (x,)
+def _to_list(x):
+ return [x] if isinstance(x, ops.Tensor) else list(x)
def tensor_pool(input_values,
@@ -63,8 +62,8 @@ def tensor_pool(input_values,
`pool_size` = 0 or `pooling_probability` = 0.
Args:
- input_values: A `Tensor`, or a list or tuple of `Tensor`s from which to read
- values to be pooled.
+ input_values: An arbitrarily nested structure of `tf.Tensors`, from which to
+ read values to be pooled.
pool_size: An integer specifying the maximum size of the pool. Defaults to
50.
pooling_probability: A float `Tensor` specifying the probability of getting
@@ -72,9 +71,10 @@ def tensor_pool(input_values,
name: A string prefix for the name scope for all tensorflow ops.
Returns:
- A `Tensor`, or a list or tuple of `Tensor`s (according to the type ofx
- `input_values`) which is with given probability either the `input_values` or
- a randomly chosen sample that was previously inserted in the pool.
+ A nested structure of `Tensor` objects with the same structure as
+ `input_values`. With the given probability, the Tensor values are either the
+ same as in `input_values` or a randomly chosen sample that was previously
+ inserted in the pool.
Raises:
ValueError: If `pool_size` is negative.
@@ -86,11 +86,10 @@ def tensor_pool(input_values,
return input_values
original_input_values = input_values
- input_values = _to_tuple(input_values)
+ input_values = nest.flatten(input_values)
- with ops.name_scope(
- '{}_pool_queue'.format(name),
- values=input_values + (pooling_probability,)):
+ with ops.name_scope('{}_pool_queue'.format(name),
+ values=input_values + [pooling_probability]):
pool_queue = data_flow_ops.RandomShuffleQueue(
capacity=pool_size,
min_after_dequeue=0,
@@ -112,10 +111,10 @@ def tensor_pool(input_values,
def _get_input_value_pooled():
enqueue_op = pool_queue.enqueue(input_values)
with ops.control_dependencies([enqueue_op]):
- return tuple(array_ops.identity(v) for v in input_values)
+ return [array_ops.identity(v) for v in input_values]
def _get_random_pool_value_and_enqueue_input():
- dequeue_values = _to_tuple(pool_queue.dequeue())
+ dequeue_values = _to_list(pool_queue.dequeue())
with ops.control_dependencies(dequeue_values):
enqueue_op = pool_queue.enqueue(input_values)
with ops.control_dependencies([enqueue_op]):
@@ -124,7 +123,7 @@ def tensor_pool(input_values,
return control_flow_ops.cond(prob, lambda: dequeue_values,
lambda: input_values)
- output_values = _to_tuple(control_flow_ops.cond(
+ output_values = _to_list(control_flow_ops.cond(
pool_queue.size() < pool_size, _get_input_value_pooled,
_get_random_pool_value_and_enqueue_input))
@@ -132,8 +131,4 @@ def tensor_pool(input_values,
for input_value, output_value in zip(input_values, output_values):
output_value.set_shape(input_value.shape)
- if isinstance(original_input_values, list):
- return list(output_values)
- elif isinstance(original_input_values, tuple):
- return output_values
- return output_values[0]
+ return nest.pack_sequence_as(original_input_values, output_values)
diff --git a/tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py b/tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py
index d8cf549cf7..08584dcd65 100644
--- a/tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py
+++ b/tensorflow/contrib/gan/python/features/python/random_tensor_pool_test.py
@@ -21,7 +21,9 @@ from __future__ import print_function
import numpy as np
from tensorflow.contrib.gan.python.features.python.random_tensor_pool_impl import tensor_pool
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@@ -111,6 +113,23 @@ class TensorPoolTest(test.TestCase):
self.assertEqual(len(outs), len(input_values))
self.assertEqual(outs[1] - outs[0], 1)
+ def test_pool_preserves_shape(self):
+ t = constant_op.constant(1)
+ input_values = [[t, t, t], (t, t), t]
+ output_values = tensor_pool(input_values, pool_size=5)
+ print('stuff: ', output_values)
+ # Overall shape.
+ self.assertIsInstance(output_values, list)
+ self.assertEqual(3, len(output_values))
+ # Shape of first element.
+ self.assertIsInstance(output_values[0], list)
+ self.assertEqual(3, len(output_values[0]))
+ # Shape of second element.
+ self.assertIsInstance(output_values[1], tuple)
+ self.assertEqual(2, len(output_values[1]))
+ # Shape of third element.
+ self.assertIsInstance(output_values[2], ops.Tensor)
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/gan/python/losses/python/losses_impl.py b/tensorflow/contrib/gan/python/losses/python/losses_impl.py
index 1ba3a64167..d389748374 100644
--- a/tensorflow/contrib/gan/python/losses/python/losses_impl.py
+++ b/tensorflow/contrib/gan/python/losses/python/losses_impl.py
@@ -949,6 +949,11 @@ def cycle_consistency_loss(data_x,
* loss = (loss_x2x + loss_y2y) / 2
where `loss` is the final result.
+ For the L1-norm, we follow the original implementation:
+ https://github.com/junyanz/CycleGAN/blob/master/models/cycle_gan_model.lua
+ we use L1-norm of pixel-wise error normalized by data size such that
+ `cycle_loss_weight` can be specified independent of image size.
+
See https://arxiv.org/abs/1703.10593 for more details.
Args:
@@ -965,19 +970,12 @@ def cycle_consistency_loss(data_x,
A scalar `Tensor` of cycle consistency loss.
"""
- def _partial_cycle_consistency_loss(data, reconstructed_data):
- # Following the original implementation
- # https://github.com/junyanz/CycleGAN/blob/master/models/cycle_gan_model.lua
- # use L1-norm of pixel-wise error normalized by data size so that
- # `cycle_loss_weight` can be specified independent of image size.
- return math_ops.reduce_mean(math_ops.abs(data - reconstructed_data))
-
with ops.name_scope(
scope,
'cycle_consistency_loss',
values=[data_x, reconstructed_data_x, data_y, reconstructed_data_y]):
- loss_x2x = _partial_cycle_consistency_loss(data_x, reconstructed_data_x)
- loss_y2y = _partial_cycle_consistency_loss(data_y, reconstructed_data_y)
+ loss_x2x = losses.absolute_difference(data_x, reconstructed_data_x)
+ loss_y2y = losses.absolute_difference(data_y, reconstructed_data_y)
loss = (loss_x2x + loss_y2y) / 2.0
if add_summaries:
summary.scalar('cycle_consistency_loss_x2x', loss_x2x)
diff --git a/tensorflow/contrib/gan/python/namedtuples.py b/tensorflow/contrib/gan/python/namedtuples.py
index 25cfeafeec..a462b68e28 100644
--- a/tensorflow/contrib/gan/python/namedtuples.py
+++ b/tensorflow/contrib/gan/python/namedtuples.py
@@ -25,12 +25,12 @@ from __future__ import print_function
import collections
-
__all__ = [
'GANModel',
'InfoGANModel',
'ACGANModel',
'CycleGANModel',
+ 'StarGANModel',
'GANLoss',
'CycleGANLoss',
'GANTrainOps',
@@ -136,6 +136,54 @@ class CycleGANModel(
"""
+class StarGANModel(
+ collections.namedtuple('StarGANModel', (
+ 'input_data',
+ 'input_data_domain_label',
+ 'generated_data',
+ 'generated_data_domain_target',
+ 'reconstructed_data',
+ 'discriminator_input_data_source_predication',
+ 'discriminator_generated_data_source_predication',
+ 'discriminator_input_data_domain_predication',
+ 'discriminator_generated_data_domain_predication',
+ 'generator_variables',
+ 'generator_scope',
+ 'generator_fn',
+ 'discriminator_variables',
+ 'discriminator_scope',
+ 'discriminator_fn',
+ ))):
+ """A StarGANModel contains all the pieces needed for StarGAN training.
+
+ Args:
+ input_data: The real images that need to be transferred by the generator.
+ input_data_domain_label: The real domain labels associated with the real
+ images.
+ generated_data: The generated images produced by the generator. It has the
+ same shape as the input_data.
+ generated_data_domain_target: The target domain that the generated images
+ belong to. It has the same shape as the input_data_domain_label.
+ reconstructed_data: The reconstructed images produced by the G(enerator).
+ reconstructed_data = G(G(input_data, generated_data_domain_target),
+ input_data_domain_label).
+ discriminator_input_data_source: The discriminator's output for predicting
+ the source (real/generated) of input_data.
+ discriminator_generated_data_source: The discriminator's output for
+ predicting the source (real/generated) of generated_data.
+ discriminator_input_data_domain_predication: The discriminator's output for
+ predicting the domain_label for the input_data.
+ discriminator_generated_data_domain_predication: The discriminatorr's output
+ for predicting the domain_target for the generated_data.
+ generator_variables: A list of all generator variables.
+ generator_scope: Variable scope all generator variables live in.
+ generator_fn: The generator function.
+ discriminator_variables: A list of all discriminator variables.
+ discriminator_scope: Variable scope all discriminator variables live in.
+ discriminator_fn: The discriminator function.
+ """
+
+
class GANLoss(
collections.namedtuple('GANLoss', (
'generator_loss',
diff --git a/tensorflow/contrib/gan/python/train.py b/tensorflow/contrib/gan/python/train.py
index 6fa43059f3..df603d1f18 100644
--- a/tensorflow/contrib/gan/python/train.py
+++ b/tensorflow/contrib/gan/python/train.py
@@ -36,10 +36,12 @@ from tensorflow.contrib.gan.python import losses as tfgan_losses
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.slim.python.slim import learning as slim_learning
from tensorflow.contrib.training.python.training import training
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.distributions import distribution as ds
from tensorflow.python.ops.losses import losses
@@ -47,12 +49,12 @@ from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
-
__all__ = [
'gan_model',
'infogan_model',
'acgan_model',
'cyclegan_model',
+ 'stargan_model',
'gan_loss',
'cyclegan_loss',
'gan_train_ops',
@@ -123,16 +125,9 @@ def gan_model(
discriminator_variables = variables_lib.get_trainable_variables(dis_scope)
return namedtuples.GANModel(
- generator_inputs,
- generated_data,
- generator_variables,
- gen_scope,
- generator_fn,
- real_data,
- discriminator_real_outputs,
- discriminator_gen_outputs,
- discriminator_variables,
- dis_scope,
+ generator_inputs, generated_data, generator_variables, gen_scope,
+ generator_fn, real_data, discriminator_real_outputs,
+ discriminator_gen_outputs, discriminator_variables, dis_scope,
discriminator_fn)
@@ -201,8 +196,7 @@ def infogan_model(
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
- discriminator_variables = variables_lib.get_trainable_variables(
- disc_scope)
+ discriminator_variables = variables_lib.get_trainable_variables(disc_scope)
return namedtuples.InfoGANModel(
generator_inputs,
@@ -279,12 +273,12 @@ def acgan_model(
generator_inputs = _convert_tensor_or_l_or_d(generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as dis_scope:
- with ops.name_scope(dis_scope.name+'/generated/'):
+ with ops.name_scope(dis_scope.name + '/generated/'):
(discriminator_gen_outputs, discriminator_gen_classification_logits
) = _validate_acgan_discriminator_outputs(
discriminator_fn(generated_data, generator_inputs))
with variable_scope.variable_scope(dis_scope, reuse=True):
- with ops.name_scope(dis_scope.name+'/real/'):
+ with ops.name_scope(dis_scope.name + '/real/'):
real_data = ops.convert_to_tensor(real_data)
(discriminator_real_outputs, discriminator_real_classification_logits
) = _validate_acgan_discriminator_outputs(
@@ -297,8 +291,7 @@ def acgan_model(
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
- discriminator_variables = variables_lib.get_trainable_variables(
- dis_scope)
+ discriminator_variables = variables_lib.get_trainable_variables(dis_scope)
return namedtuples.ACGANModel(
generator_inputs, generated_data, generator_variables, gen_scope,
@@ -379,6 +372,108 @@ def cyclegan_model(
reconstructed_y)
+def stargan_model(generator_fn,
+ discriminator_fn,
+ input_data,
+ input_data_domain_label,
+ generator_scope='Generator',
+ discriminator_scope='Discriminator'):
+ """Returns a StarGAN model outputs and variables.
+
+ See https://arxiv.org/abs/1711.09020 for more details.
+
+ Args:
+ generator_fn: A python lambda that takes `inputs` and `targets` as inputs
+ and returns 'generated_data' as the transformed version of `input` based
+ on the `target`. `input` has shape (n, h, w, c), `targets` has shape (n,
+ num_domains), and `generated_data` has the same shape as `input`.
+ discriminator_fn: A python lambda that takes `inputs` and `num_domains` as
+ inputs and returns a tuple (`source_prediction`, `domain_prediction`).
+ `source_prediction` represents the source(real/generated) prediction by
+ the discriminator, and `domain_prediction` represents the domain
+ prediction/classification by the discriminator. `source_prediction` has
+ shape (n) and `domain_prediction` has shape (n, num_domains).
+ input_data: Tensor or a list of tensor of shape (n, h, w, c) representing
+ the real input images.
+ input_data_domain_label: Tensor or a list of tensor of shape (batch_size,
+ num_domains) representing the domain label associated with the real
+ images.
+ generator_scope: Optional generator variable scope. Useful if you want to
+ reuse a subgraph that has already been created.
+ discriminator_scope: Optional discriminator variable scope. Useful if you
+ want to reuse a subgraph that has already been created.
+
+ Returns:
+ StarGANModel nametuple return the tensor that are needed to compute the
+ loss.
+
+ Raises:
+ ValueError: If the shape of `input_data_domain_label` is not rank 2 or fully
+ defined in every dimensions.
+ """
+
+ # Convert to tensor.
+ input_data = _convert_tensor_or_l_or_d(input_data)
+ input_data_domain_label = _convert_tensor_or_l_or_d(input_data_domain_label)
+
+ # Convert list of tensor to a single tensor if applicable.
+ if isinstance(input_data, (list, tuple)):
+ input_data = array_ops.concat(
+ [ops.convert_to_tensor(x) for x in input_data], 0)
+ if isinstance(input_data_domain_label, (list, tuple)):
+ input_data_domain_label = array_ops.concat(
+ [ops.convert_to_tensor(x) for x in input_data_domain_label], 0)
+
+ # Get batch_size, num_domains from the labels.
+ input_data_domain_label.shape.assert_has_rank(2)
+ input_data_domain_label.shape.assert_is_fully_defined()
+ batch_size, num_domains = input_data_domain_label.shape.as_list()
+
+ # Transform input_data to random target domains.
+ with variable_scope.variable_scope(generator_scope) as generator_scope:
+ generated_data_domain_target = _generate_stargan_random_domain_target(
+ batch_size, num_domains)
+ generated_data = generator_fn(input_data, generated_data_domain_target)
+
+ # Transform generated_data back to the original input_data domain.
+ with variable_scope.variable_scope(generator_scope, reuse=True):
+ reconstructed_data = generator_fn(generated_data, input_data_domain_label)
+
+ # Predict source and domain for the generated_data using the discriminator.
+ with variable_scope.variable_scope(
+ discriminator_scope) as discriminator_scope:
+ disc_gen_data_source_pred, disc_gen_data_domain_pred = discriminator_fn(
+ generated_data, num_domains)
+
+ # Predict source and domain for the input_data using the discriminator.
+ with variable_scope.variable_scope(discriminator_scope, reuse=True):
+ disc_input_data_source_pred, disc_input_data_domain_pred = discriminator_fn(
+ input_data, num_domains)
+
+ # Collect trainable variables from the neural networks.
+ generator_variables = variables_lib.get_trainable_variables(generator_scope)
+ discriminator_variables = variables_lib.get_trainable_variables(
+ discriminator_scope)
+
+ # Create the StarGANModel namedtuple.
+ return namedtuples.StarGANModel(
+ input_data=input_data,
+ input_data_domain_label=input_data_domain_label,
+ generated_data=generated_data,
+ generated_data_domain_target=generated_data_domain_target,
+ reconstructed_data=reconstructed_data,
+ discriminator_input_data_source_predication=disc_input_data_source_pred,
+ discriminator_generated_data_source_predication=disc_gen_data_source_pred,
+ discriminator_input_data_domain_predication=disc_input_data_domain_pred,
+ discriminator_generated_data_domain_predication=disc_gen_data_domain_pred,
+ generator_variables=generator_variables,
+ generator_scope=generator_scope,
+ generator_fn=generator_fn,
+ discriminator_variables=discriminator_variables,
+ discriminator_scope=discriminator_scope,
+ discriminator_fn=discriminator_fn)
+
+
def _validate_aux_loss_weight(aux_loss_weight, name='aux_loss_weight'):
if isinstance(aux_loss_weight, ops.Tensor):
aux_loss_weight.shape.assert_is_compatible_with([])
@@ -419,33 +514,42 @@ def _tensor_pool_adjusted_model(model, tensor_pool_fn):
Raises:
ValueError: If tensor pool does not support the `model`.
"""
- if tensor_pool_fn is None:
- return model
-
- pooled_generated_data, pooled_generator_inputs = tensor_pool_fn(
- (model.generated_data, model.generator_inputs))
-
if isinstance(model, namedtuples.GANModel):
+ pooled_generator_inputs, pooled_generated_data = tensor_pool_fn(
+ (model.generator_inputs, model.generated_data))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
dis_gen_outputs = model.discriminator_fn(pooled_generated_data,
pooled_generator_inputs)
- return model._replace(discriminator_gen_outputs=dis_gen_outputs)
+ return model._replace(
+ generator_inputs=pooled_generator_inputs,
+ generated_data=pooled_generated_data,
+ discriminator_gen_outputs=dis_gen_outputs)
elif isinstance(model, namedtuples.ACGANModel):
+ pooled_generator_inputs, pooled_generated_data = tensor_pool_fn(
+ (model.generator_inputs, model.generated_data))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
- (dis_pooled_gen_outputs,
- dis_pooled_gen_classification_logits) = model.discriminator_fn(
+ (pooled_discriminator_gen_outputs,
+ pooled_discriminator_gen_classification_logits) = model.discriminator_fn(
pooled_generated_data, pooled_generator_inputs)
return model._replace(
- discriminator_gen_outputs=dis_pooled_gen_outputs,
+ generator_inputs=pooled_generator_inputs,
+ generated_data=pooled_generated_data,
+ discriminator_gen_outputs=pooled_discriminator_gen_outputs,
discriminator_gen_classification_logits=
- dis_pooled_gen_classification_logits)
+ pooled_discriminator_gen_classification_logits)
elif isinstance(model, namedtuples.InfoGANModel):
+ pooled_generator_inputs, pooled_generated_data, pooled_structured_input = (
+ tensor_pool_fn((model.generator_inputs, model.generated_data,
+ model.structured_generator_inputs)))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
- (dis_pooled_gen_outputs,
+ (pooled_discriminator_gen_outputs,
pooled_predicted_distributions) = model.discriminator_and_aux_fn(
pooled_generated_data, pooled_generator_inputs)
return model._replace(
- discriminator_gen_outputs=dis_pooled_gen_outputs,
+ generator_inputs=pooled_generator_inputs,
+ generated_data=pooled_generated_data,
+ structured_generator_inputs=pooled_structured_input,
+ discriminator_gen_outputs=pooled_discriminator_gen_outputs,
predicted_distributions=pooled_predicted_distributions)
else:
raise ValueError('Tensor pool does not support `model`: %s.' % type(model))
@@ -512,8 +616,8 @@ def gan_loss(
`model` isn't an `InfoGANModel`.
"""
# Validate arguments.
- gradient_penalty_weight = _validate_aux_loss_weight(gradient_penalty_weight,
- 'gradient_penalty_weight')
+ gradient_penalty_weight = _validate_aux_loss_weight(
+ gradient_penalty_weight, 'gradient_penalty_weight')
mutual_information_penalty_weight = _validate_aux_loss_weight(
mutual_information_penalty_weight, 'infogan_weight')
aux_cond_generator_weight = _validate_aux_loss_weight(
@@ -537,33 +641,38 @@ def gan_loss(
'is provided, `model` must be an `ACGANModel`. Instead, was %s.' %
type(model))
+ # Optionally create pooled model.
+ pooled_model = (_tensor_pool_adjusted_model(model, tensor_pool_fn) if
+ tensor_pool_fn else model)
+
# Create standard losses.
gen_loss = generator_loss_fn(model, add_summaries=add_summaries)
- dis_loss = discriminator_loss_fn(
- _tensor_pool_adjusted_model(model, tensor_pool_fn),
- add_summaries=add_summaries)
+ dis_loss = discriminator_loss_fn(pooled_model, add_summaries=add_summaries)
# Add optional extra losses.
if _use_aux_loss(gradient_penalty_weight):
gp_loss = tfgan_losses.wasserstein_gradient_penalty(
- model,
+ pooled_model,
epsilon=gradient_penalty_epsilon,
target=gradient_penalty_target,
one_sided=gradient_penalty_one_sided,
add_summaries=add_summaries)
dis_loss += gradient_penalty_weight * gp_loss
if _use_aux_loss(mutual_information_penalty_weight):
- info_loss = tfgan_losses.mutual_information_penalty(
+ gen_info_loss = tfgan_losses.mutual_information_penalty(
model, add_summaries=add_summaries)
- dis_loss += mutual_information_penalty_weight * info_loss
- gen_loss += mutual_information_penalty_weight * info_loss
+ dis_info_loss = (gen_info_loss if tensor_pool_fn is None else
+ tfgan_losses.mutual_information_penalty(
+ pooled_model, add_summaries=add_summaries))
+ gen_loss += mutual_information_penalty_weight * gen_info_loss
+ dis_loss += mutual_information_penalty_weight * dis_info_loss
if _use_aux_loss(aux_cond_generator_weight):
ac_gen_loss = tfgan_losses.acgan_generator_loss(
model, add_summaries=add_summaries)
gen_loss += aux_cond_generator_weight * ac_gen_loss
if _use_aux_loss(aux_cond_discriminator_weight):
ac_disc_loss = tfgan_losses.acgan_discriminator_loss(
- model, add_summaries=add_summaries)
+ pooled_model, add_summaries=add_summaries)
dis_loss += aux_cond_discriminator_weight * ac_disc_loss
# Gathers auxiliary losses.
if model.generator_scope:
@@ -631,8 +740,8 @@ def cyclegan_loss(
generator_loss_fn=generator_loss_fn,
discriminator_loss_fn=discriminator_loss_fn,
**kwargs)
- return partial_loss._replace(
- generator_loss=partial_loss.generator_loss + aux_loss)
+ return partial_loss._replace(generator_loss=partial_loss.generator_loss +
+ aux_loss)
with ops.name_scope('cyclegan_loss_x2y'):
loss_x2y = _partial_loss(model.model_x2y)
@@ -822,12 +931,14 @@ def get_sequential_train_hooks(train_steps=namedtuples.GANTrainSteps(1, 1)):
Returns:
A function that takes a GANTrainOps tuple and returns a list of hooks.
"""
+
def get_hooks(train_ops):
generator_hook = RunTrainOpsHook(train_ops.generator_train_op,
train_steps.generator_train_steps)
discriminator_hook = RunTrainOpsHook(train_ops.discriminator_train_op,
train_steps.discriminator_train_steps)
return [generator_hook, discriminator_hook]
+
return get_hooks
@@ -881,23 +992,23 @@ def get_joint_train_hooks(train_steps=namedtuples.GANTrainSteps(1, 1)):
d_hook = RunTrainOpsHook(d_op, num_d_steps)
return [joint_hook, g_hook, d_hook]
+
return get_hooks
# TODO(joelshor): This function currently returns the global step. Find a
# good way for it to return the generator, discriminator, and final losses.
-def gan_train(
- train_ops,
- logdir,
- get_hooks_fn=get_sequential_train_hooks(),
- master='',
- is_chief=True,
- scaffold=None,
- hooks=None,
- chief_only_hooks=None,
- save_checkpoint_secs=600,
- save_summaries_steps=100,
- config=None):
+def gan_train(train_ops,
+ logdir,
+ get_hooks_fn=get_sequential_train_hooks(),
+ master='',
+ is_chief=True,
+ scaffold=None,
+ hooks=None,
+ chief_only_hooks=None,
+ save_checkpoint_secs=600,
+ save_summaries_steps=100,
+ config=None):
"""A wrapper around `contrib.training.train` that uses GAN hooks.
Args:
@@ -943,8 +1054,7 @@ def gan_train(
config=config)
-def get_sequential_train_steps(
- train_steps=namedtuples.GANTrainSteps(1, 1)):
+def get_sequential_train_steps(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a thin wrapper around slim.learning.train_step, for GANs.
This function is to provide support for the Supervisor. For new code, please
@@ -1042,3 +1152,19 @@ def _validate_acgan_discriminator_outputs(discriminator_output):
'A discriminator function for ACGAN must output a tuple '
'consisting of (discrimination logits, classification logits).')
return a, b
+
+
+def _generate_stargan_random_domain_target(batch_size, num_domains):
+ """Generate random domain label.
+
+ Args:
+ batch_size: (int) Number of random domain label.
+ num_domains: (int) Number of domains representing with the label.
+
+ Returns:
+ Tensor of shape (batch_size, num_domains) representing random label.
+ """
+ domain_idx = random_ops.random_uniform(
+ [batch_size], minval=0, maxval=num_domains, dtype=dtypes.int32)
+
+ return array_ops.one_hot(domain_idx, num_domains)
diff --git a/tensorflow/contrib/gan/python/train_test.py b/tensorflow/contrib/gan/python/train_test.py
index 3ebbe55d05..fa52e9cca1 100644
--- a/tensorflow/contrib/gan/python/train_test.py
+++ b/tensorflow/contrib/gan/python/train_test.py
@@ -18,8 +18,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from absl.testing import parameterized
import numpy as np
+from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python import train
@@ -30,6 +32,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
@@ -84,19 +87,47 @@ class InfoGANDiscriminator(object):
def acgan_discriminator_model(inputs, _, num_classes=10):
- return (discriminator_model(inputs, _), array_ops.one_hot(
- # TODO(haeusser): infer batch size from input
- random_ops.random_uniform([3], maxval=num_classes, dtype=dtypes.int32),
- num_classes))
+ return (
+ discriminator_model(inputs, _),
+ array_ops.one_hot(
+ # TODO(haeusser): infer batch size from input
+ random_ops.random_uniform(
+ [3], maxval=num_classes, dtype=dtypes.int32),
+ num_classes))
class ACGANDiscriminator(object):
def __call__(self, inputs, _, num_classes=10):
- return (discriminator_model(inputs, _), array_ops.one_hot(
- # TODO(haeusser): infer batch size from input
- random_ops.random_uniform([3], maxval=num_classes, dtype=dtypes.int32),
- num_classes))
+ return (
+ discriminator_model(inputs, _),
+ array_ops.one_hot(
+ # TODO(haeusser): infer batch size from input
+ random_ops.random_uniform(
+ [3], maxval=num_classes, dtype=dtypes.int32),
+ num_classes))
+
+
+def stargan_generator_model(inputs, _):
+ """Dummy generator for StarGAN."""
+
+ return variable_scope.get_variable('dummy_g', initializer=0.5) * inputs
+
+
+def stargan_discriminator_model(inputs, num_domains):
+ """Differentiable dummy discriminator for StarGAN."""
+
+ hidden = layers.flatten(inputs)
+
+ output_src = math_ops.reduce_mean(hidden, axis=1)
+
+ output_cls = layers.fully_connected(
+ inputs=hidden,
+ num_outputs=num_domains,
+ activation_fn=None,
+ normalizer_fn=None,
+ biases_initializer=None)
+ return output_src, output_cls
def get_gan_model():
@@ -122,8 +153,7 @@ def get_gan_model():
def get_callable_gan_model():
ganmodel = get_gan_model()
return ganmodel._replace(
- generator_fn=Generator(),
- discriminator_fn=Discriminator())
+ generator_fn=Generator(), discriminator_fn=Discriminator())
def create_gan_model():
@@ -248,63 +278,33 @@ def get_sync_optimizer():
replicas_to_aggregate=1)
-def get_tensor_pool_fn(pool_size):
-
- def tensor_pool_fn_impl(input_values):
- return random_tensor_pool.tensor_pool(input_values, pool_size=pool_size)
-
- return tensor_pool_fn_impl
-
-
-def get_tensor_pool_fn_for_infogan(pool_size):
-
- def tensor_pool_fn_impl(input_values):
- generated_data, generator_inputs = input_values
- output_values = random_tensor_pool.tensor_pool(
- [generated_data] + generator_inputs, pool_size=pool_size)
- return output_values[0], output_values[1:]
-
- return tensor_pool_fn_impl
-
-
-class GANModelTest(test.TestCase):
+class GANModelTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_model`."""
- def _test_output_type_helper(self, create_fn, tuple_type):
- self.assertTrue(isinstance(create_fn(), tuple_type))
-
- def test_output_type_gan(self):
- self._test_output_type_helper(get_gan_model, namedtuples.GANModel)
-
- def test_output_type_callable_gan(self):
- self._test_output_type_helper(get_callable_gan_model, namedtuples.GANModel)
-
- def test_output_type_infogan(self):
- self._test_output_type_helper(get_infogan_model, namedtuples.InfoGANModel)
-
- def test_output_type_callable_infogan(self):
- self._test_output_type_helper(
- get_callable_infogan_model, namedtuples.InfoGANModel)
-
- def test_output_type_acgan(self):
- self._test_output_type_helper(get_acgan_model, namedtuples.ACGANModel)
-
- def test_output_type_callable_acgan(self):
- self._test_output_type_helper(
- get_callable_acgan_model, namedtuples.ACGANModel)
-
- def test_output_type_cyclegan(self):
- self._test_output_type_helper(get_cyclegan_model, namedtuples.CycleGANModel)
-
- def test_output_type_callable_cyclegan(self):
- self._test_output_type_helper(get_callable_cyclegan_model,
- namedtuples.CycleGANModel)
+ @parameterized.named_parameters(
+ ('gan', get_gan_model, namedtuples.GANModel),
+ ('callable_gan', get_callable_gan_model, namedtuples.GANModel),
+ ('infogan', get_infogan_model, namedtuples.InfoGANModel),
+ ('callable_infogan', get_callable_infogan_model,
+ namedtuples.InfoGANModel),
+ ('acgan', get_acgan_model, namedtuples.ACGANModel),
+ ('callable_acgan', get_callable_acgan_model, namedtuples.ACGANModel),
+ ('cyclegan', get_cyclegan_model, namedtuples.CycleGANModel),
+ ('callable_cyclegan', get_callable_cyclegan_model,
+ namedtuples.CycleGANModel),
+ )
+ def test_output_type(self, create_fn, expected_tuple_type):
+ """Test that output type is as expected."""
+ self.assertIsInstance(create_fn(), expected_tuple_type)
def test_no_shape_check(self):
+
def dummy_generator_model(_):
return (None, None)
+
def dummy_discriminator_model(data, conditioning): # pylint: disable=unused-argument
return 1
+
with self.assertRaisesRegexp(AttributeError, 'object has no attribute'):
train.gan_model(
dummy_generator_model,
@@ -320,52 +320,182 @@ class GANModelTest(test.TestCase):
check_shapes=False)
-class GANLossTest(test.TestCase):
- """Tests for `gan_loss`."""
-
- # Test output type.
- def _test_output_type_helper(self, get_gan_model_fn):
- loss = train.gan_loss(get_gan_model_fn(), add_summaries=True)
- self.assertTrue(isinstance(loss, namedtuples.GANLoss))
- self.assertGreater(len(ops.get_collection(ops.GraphKeys.SUMMARIES)), 0)
+class StarGANModelTest(test.TestCase):
+ """Tests for `stargan_model`."""
+
+ @staticmethod
+ def create_input_and_label_tensor(batch_size, img_size, c_size, num_domains):
+ input_tensor_list = []
+ label_tensor_list = []
+ for _ in range(num_domains):
+ input_tensor_list.append(
+ random_ops.random_uniform((batch_size, img_size, img_size, c_size)))
+ domain_idx = random_ops.random_uniform(
+ [batch_size], minval=0, maxval=num_domains, dtype=dtypes.int32)
+ label_tensor_list.append(array_ops.one_hot(domain_idx, num_domains))
+ return input_tensor_list, label_tensor_list
+
+ def test_generate_stargan_random_domain_target(self):
+ batch_size = 8
+ domain_numbers = 3
+
+ target_tensor = train._generate_stargan_random_domain_target(
+ batch_size, domain_numbers)
+
+ with self.test_session() as sess:
+ targets = sess.run(target_tensor)
+ self.assertTupleEqual((batch_size, domain_numbers), targets.shape)
+ for target in targets:
+ self.assertEqual(1, np.sum(target))
+ self.assertEqual(1, np.max(target))
+
+ def test_stargan_model_output_type(self):
+ batch_size = 2
+ img_size = 16
+ c_size = 3
+ num_domains = 5
+
+ input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
+ batch_size, img_size, c_size, num_domains)
+ model = train.stargan_model(
+ generator_fn=stargan_generator_model,
+ discriminator_fn=stargan_discriminator_model,
+ input_data=input_tensor,
+ input_data_domain_label=label_tensor)
+
+ self.assertIsInstance(model, namedtuples.StarGANModel)
+ self.assertTrue(isinstance(model.discriminator_variables, list))
+ self.assertTrue(isinstance(model.generator_variables, list))
+ self.assertIsInstance(model.discriminator_scope,
+ variable_scope.VariableScope)
+ self.assertTrue(model.generator_scope, variable_scope.VariableScope)
+ self.assertTrue(callable(model.discriminator_fn))
+ self.assertTrue(callable(model.generator_fn))
+
+ def test_stargan_model_generator_output(self):
+ batch_size = 2
+ img_size = 16
+ c_size = 3
+ num_domains = 5
+
+ input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
+ batch_size, img_size, c_size, num_domains)
+ model = train.stargan_model(
+ generator_fn=stargan_generator_model,
+ discriminator_fn=stargan_discriminator_model,
+ input_data=input_tensor,
+ input_data_domain_label=label_tensor)
- def test_output_type_gan(self):
- self._test_output_type_helper(get_gan_model)
+ with self.test_session(use_gpu=True) as sess:
- def test_output_type_callable_gan(self):
- self._test_output_type_helper(get_callable_gan_model)
+ sess.run(variables.global_variables_initializer())
- def test_output_type_infogan(self):
- self._test_output_type_helper(get_infogan_model)
+ input_data, generated_data, reconstructed_data = sess.run(
+ [model.input_data, model.generated_data, model.reconstructed_data])
+ self.assertTupleEqual(
+ (batch_size * num_domains, img_size, img_size, c_size),
+ input_data.shape)
+ self.assertTupleEqual(
+ (batch_size * num_domains, img_size, img_size, c_size),
+ generated_data.shape)
+ self.assertTupleEqual(
+ (batch_size * num_domains, img_size, img_size, c_size),
+ reconstructed_data.shape)
+
+ def test_stargan_model_discriminator_output(self):
+ batch_size = 2
+ img_size = 16
+ c_size = 3
+ num_domains = 5
+
+ input_tensor, label_tensor = StarGANModelTest.create_input_and_label_tensor(
+ batch_size, img_size, c_size, num_domains)
+ model = train.stargan_model(
+ generator_fn=stargan_generator_model,
+ discriminator_fn=stargan_discriminator_model,
+ input_data=input_tensor,
+ input_data_domain_label=label_tensor)
- def test_output_type_callable_infogan(self):
- self._test_output_type_helper(get_callable_infogan_model)
+ with self.test_session(use_gpu=True) as sess:
- def test_output_type_acgan(self):
- self._test_output_type_helper(get_acgan_model)
+ sess.run(variables.global_variables_initializer())
- def test_output_type_callable_acgan(self):
- self._test_output_type_helper(get_callable_acgan_model)
+ disc_input_data_source_pred, disc_gen_data_source_pred = sess.run([
+ model.discriminator_input_data_source_predication,
+ model.discriminator_generated_data_source_predication
+ ])
+ self.assertEqual(1, len(disc_input_data_source_pred.shape))
+ self.assertEqual(batch_size * num_domains,
+ disc_input_data_source_pred.shape[0])
+ self.assertEqual(1, len(disc_gen_data_source_pred.shape))
+ self.assertEqual(batch_size * num_domains,
+ disc_gen_data_source_pred.shape[0])
+
+ input_label, disc_input_label, gen_label, disc_gen_label = sess.run([
+ model.input_data_domain_label,
+ model.discriminator_input_data_domain_predication,
+ model.generated_data_domain_target,
+ model.discriminator_generated_data_domain_predication
+ ])
+ self.assertTupleEqual((batch_size * num_domains, num_domains),
+ input_label.shape)
+ self.assertTupleEqual((batch_size * num_domains, num_domains),
+ disc_input_label.shape)
+ self.assertTupleEqual((batch_size * num_domains, num_domains),
+ gen_label.shape)
+ self.assertTupleEqual((batch_size * num_domains, num_domains),
+ disc_gen_label.shape)
+
+
+class GANLossTest(test.TestCase, parameterized.TestCase):
+ """Tests for `gan_loss`."""
- def test_output_type_cyclegan(self):
- loss = train.cyclegan_loss(create_cyclegan_model(), add_summaries=True)
- self.assertIsInstance(loss, namedtuples.CycleGANLoss)
+ @parameterized.named_parameters(
+ ('gan', get_gan_model),
+ ('callable_gan', get_callable_gan_model),
+ ('infogan', get_infogan_model),
+ ('callable_infogan', get_callable_infogan_model),
+ ('acgan', get_acgan_model),
+ ('callable_acgan', get_callable_acgan_model),
+ )
+ def test_output_type(self, get_gan_model_fn):
+ """Test output type."""
+ loss = train.gan_loss(get_gan_model_fn(), add_summaries=True)
+ self.assertIsInstance(loss, namedtuples.GANLoss)
self.assertGreater(len(ops.get_collection(ops.GraphKeys.SUMMARIES)), 0)
- def test_output_type_callable_cyclegan(self):
- loss = train.cyclegan_loss(
- create_callable_cyclegan_model(), add_summaries=True)
+ @parameterized.named_parameters(
+ ('cyclegan', create_cyclegan_model),
+ ('callable_cyclegan', create_callable_cyclegan_model),
+ )
+ def test_cyclegan_output_type(self, get_gan_model_fn):
+ loss = train.cyclegan_loss(get_gan_model_fn(), add_summaries=True)
self.assertIsInstance(loss, namedtuples.CycleGANLoss)
self.assertGreater(len(ops.get_collection(ops.GraphKeys.SUMMARIES)), 0)
- # Test gradient penalty option.
- def _test_grad_penalty_helper(self, create_gan_model_fn, one_sided=False):
+ @parameterized.named_parameters(
+ ('gan', create_gan_model, False),
+ ('gan_one_sided', create_gan_model, True),
+ ('callable_gan', create_callable_gan_model, False),
+ ('callable_gan_one_sided', create_callable_gan_model, True),
+ ('infogan', create_infogan_model, False),
+ ('infogan_one_sided', create_infogan_model, True),
+ ('callable_infogan', create_callable_infogan_model, False),
+ ('callable_infogan_one_sided', create_callable_infogan_model, True),
+ ('acgan', create_acgan_model, False),
+ ('acgan_one_sided', create_acgan_model, True),
+ ('callable_acgan', create_callable_acgan_model, False),
+ ('callable_acgan_one_sided', create_callable_acgan_model, True),
+ )
+ def test_grad_penalty(self, create_gan_model_fn, one_sided):
+ """Test gradient penalty option."""
model = create_gan_model_fn()
loss = train.gan_loss(model)
- loss_gp = train.gan_loss(model,
- gradient_penalty_weight=1.0,
- gradient_penalty_one_sided=one_sided)
- self.assertTrue(isinstance(loss_gp, namedtuples.GANLoss))
+ loss_gp = train.gan_loss(
+ model,
+ gradient_penalty_weight=1.0,
+ gradient_penalty_one_sided=one_sided)
+ self.assertIsInstance(loss_gp, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
@@ -376,58 +506,28 @@ class GANLossTest(test.TestCase):
[loss.discriminator_loss, loss_gp.discriminator_loss])
self.assertEqual(loss_gen_np, loss_gen_gp_np)
- self.assertTrue(loss_dis_np < loss_dis_gp_np)
-
- def test_grad_penalty_gan(self):
- self._test_grad_penalty_helper(create_gan_model)
-
- def test_grad_penalty_callable_gan(self):
- self._test_grad_penalty_helper(create_callable_gan_model)
-
- def test_grad_penalty_infogan(self):
- self._test_grad_penalty_helper(create_infogan_model)
-
- def test_grad_penalty_callable_infogan(self):
- self._test_grad_penalty_helper(create_callable_infogan_model)
-
- def test_grad_penalty_acgan(self):
- self._test_grad_penalty_helper(create_acgan_model)
-
- def test_grad_penalty_callable_acgan(self):
- self._test_grad_penalty_helper(create_callable_acgan_model)
-
- def test_grad_penalty_one_sided_gan(self):
- self._test_grad_penalty_helper(create_gan_model, one_sided=True)
-
- def test_grad_penalty_one_sided_callable_gan(self):
- self._test_grad_penalty_helper(create_callable_gan_model, one_sided=True)
-
- def test_grad_penalty_one_sided_infogan(self):
- self._test_grad_penalty_helper(create_infogan_model, one_sided=True)
-
- def test_grad_penalty_one_sided_callable_infogan(self):
- self._test_grad_penalty_helper(
- create_callable_infogan_model, one_sided=True)
-
- def test_grad_penalty_one_sided_acgan(self):
- self._test_grad_penalty_helper(create_acgan_model, one_sided=True)
-
- def test_grad_penalty_one_sided_callable_acgan(self):
- self._test_grad_penalty_helper(create_callable_acgan_model, one_sided=True)
-
- # Test mutual information penalty option.
- def _test_mutual_info_penalty_helper(self, create_gan_model_fn):
- train.gan_loss(create_gan_model_fn(),
- mutual_information_penalty_weight=constant_op.constant(1.0))
-
- def test_mutual_info_penalty_infogan(self):
- self._test_mutual_info_penalty_helper(get_infogan_model)
-
- def test_mutual_info_penalty_callable_infogan(self):
- self._test_mutual_info_penalty_helper(get_callable_infogan_model)
-
- # Test regularization loss.
- def _test_regularization_helper(self, get_gan_model_fn):
+ self.assertLess(loss_dis_np, loss_dis_gp_np)
+
+ @parameterized.named_parameters(
+ ('infogan', get_infogan_model),
+ ('callable_infogan', get_callable_infogan_model),
+ )
+ def test_mutual_info_penalty(self, create_gan_model_fn):
+ """Test mutual information penalty option."""
+ train.gan_loss(
+ create_gan_model_fn(),
+ mutual_information_penalty_weight=constant_op.constant(1.0))
+
+ @parameterized.named_parameters(
+ ('gan', get_gan_model),
+ ('callable_gan', get_callable_gan_model),
+ ('infogan', get_infogan_model),
+ ('callable_infogan', get_callable_infogan_model),
+ ('acgan', get_acgan_model),
+ ('callable_acgan', get_callable_acgan_model),
+ )
+ def test_regularization_helper(self, get_gan_model_fn):
+ """Test regularization loss."""
# Evaluate losses without regularization.
no_reg_loss = train.gan_loss(get_gan_model_fn())
with self.test_session(use_gpu=True):
@@ -435,11 +535,11 @@ class GANLossTest(test.TestCase):
no_reg_loss_dis_np = no_reg_loss.discriminator_loss.eval()
with ops.name_scope(get_gan_model_fn().generator_scope.name):
- ops.add_to_collection(
- ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(3.0))
+ ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
+ constant_op.constant(3.0))
with ops.name_scope(get_gan_model_fn().discriminator_scope.name):
- ops.add_to_collection(
- ops.GraphKeys.REGULARIZATION_LOSSES, constant_op.constant(2.0))
+ ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES,
+ constant_op.constant(2.0))
# Check that losses now include the correct regularization values.
reg_loss = train.gan_loss(get_gan_model_fn())
@@ -447,63 +547,47 @@ class GANLossTest(test.TestCase):
reg_loss_gen_np = reg_loss.generator_loss.eval()
reg_loss_dis_np = reg_loss.discriminator_loss.eval()
- self.assertTrue(3.0, reg_loss_gen_np - no_reg_loss_gen_np)
- self.assertTrue(3.0, reg_loss_dis_np - no_reg_loss_dis_np)
-
- def test_regularization_gan(self):
- self._test_regularization_helper(get_gan_model)
-
- def test_regularization_callable_gan(self):
- self._test_regularization_helper(get_callable_gan_model)
-
- def test_regularization_infogan(self):
- self._test_regularization_helper(get_infogan_model)
-
- def test_regularization_callable_infogan(self):
- self._test_regularization_helper(get_callable_infogan_model)
+ self.assertEqual(3.0, reg_loss_gen_np - no_reg_loss_gen_np)
+ self.assertEqual(2.0, reg_loss_dis_np - no_reg_loss_dis_np)
- def test_regularization_acgan(self):
- self._test_regularization_helper(get_acgan_model)
-
- def test_regularization_callable_acgan(self):
- self._test_regularization_helper(get_callable_acgan_model)
-
- # Test that ACGan models work.
- def _test_acgan_helper(self, create_gan_model_fn):
+ @parameterized.named_parameters(
+ ('notcallable', create_acgan_model),
+ ('callable', create_callable_acgan_model),
+ )
+ def test_acgan(self, create_gan_model_fn):
+ """Test that ACGAN models work."""
model = create_gan_model_fn()
loss = train.gan_loss(model)
loss_ac_gen = train.gan_loss(model, aux_cond_generator_weight=1.0)
loss_ac_dis = train.gan_loss(model, aux_cond_discriminator_weight=1.0)
- self.assertTrue(isinstance(loss, namedtuples.GANLoss))
- self.assertTrue(isinstance(loss_ac_gen, namedtuples.GANLoss))
- self.assertTrue(isinstance(loss_ac_dis, namedtuples.GANLoss))
+ self.assertIsInstance(loss, namedtuples.GANLoss)
+ self.assertIsInstance(loss_ac_gen, namedtuples.GANLoss)
+ self.assertIsInstance(loss_ac_dis, namedtuples.GANLoss)
# Check values.
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
- loss_gen_np, loss_ac_gen_gen_np, loss_ac_dis_gen_np = sess.run(
- [loss.generator_loss,
- loss_ac_gen.generator_loss,
- loss_ac_dis.generator_loss])
- loss_dis_np, loss_ac_gen_dis_np, loss_ac_dis_dis_np = sess.run(
- [loss.discriminator_loss,
- loss_ac_gen.discriminator_loss,
- loss_ac_dis.discriminator_loss])
-
- self.assertTrue(loss_gen_np < loss_dis_np)
+ loss_gen_np, loss_ac_gen_gen_np, loss_ac_dis_gen_np = sess.run([
+ loss.generator_loss, loss_ac_gen.generator_loss,
+ loss_ac_dis.generator_loss
+ ])
+ loss_dis_np, loss_ac_gen_dis_np, loss_ac_dis_dis_np = sess.run([
+ loss.discriminator_loss, loss_ac_gen.discriminator_loss,
+ loss_ac_dis.discriminator_loss
+ ])
+
+ self.assertLess(loss_gen_np, loss_dis_np)
self.assertTrue(np.isscalar(loss_ac_gen_gen_np))
self.assertTrue(np.isscalar(loss_ac_dis_gen_np))
self.assertTrue(np.isscalar(loss_ac_gen_dis_np))
self.assertTrue(np.isscalar(loss_ac_dis_dis_np))
- def test_acgan(self):
- self._test_acgan_helper(create_acgan_model)
-
- def test_callable_acgan(self):
- self._test_acgan_helper(create_callable_acgan_model)
-
- # Test that CycleGan models work.
- def _test_cyclegan_helper(self, create_gan_model_fn):
+ @parameterized.named_parameters(
+ ('notcallable', create_cyclegan_model),
+ ('callable', create_callable_cyclegan_model),
+ )
+ def test_cyclegan(self, create_gan_model_fn):
+ """Test that CycleGan models work."""
model = create_gan_model_fn()
loss = train.cyclegan_loss(model)
self.assertIsInstance(loss, namedtuples.CycleGANLoss)
@@ -524,14 +608,65 @@ class GANLossTest(test.TestCase):
self.assertTrue(np.isscalar(loss_y2x_gen_np))
self.assertTrue(np.isscalar(loss_y2x_dis_np))
- def test_cyclegan(self):
- self._test_cyclegan_helper(create_cyclegan_model)
+ @parameterized.named_parameters(
+ ('gan', create_gan_model),
+ ('callable_gan', create_callable_gan_model),
+ ('infogan', create_infogan_model),
+ ('callable_infogan', create_callable_infogan_model),
+ ('acgan', create_acgan_model),
+ ('callable_acgan', create_callable_acgan_model),
+ )
+ def test_tensor_pool(self, create_gan_model_fn):
+ """Test tensor pool option."""
+ model = create_gan_model_fn()
+ tensor_pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=5)
+ loss = train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
+ self.assertIsInstance(loss, namedtuples.GANLoss)
+
+ # Check values.
+ with self.test_session(use_gpu=True) as sess:
+ variables.global_variables_initializer().run()
+ for _ in range(10):
+ sess.run([loss.generator_loss, loss.discriminator_loss])
+
+ def test_discriminator_only_sees_pool(self):
+ """Checks that discriminator only sees pooled values."""
+ def checker_gen_fn(_):
+ return constant_op.constant(0.0)
+ model = train.gan_model(
+ checker_gen_fn,
+ discriminator_model,
+ real_data=array_ops.zeros([]),
+ generator_inputs=random_ops.random_normal([]))
+ def tensor_pool_fn(_):
+ return (random_ops.random_uniform([]), random_ops.random_uniform([]))
+ def checker_dis_fn(inputs, _):
+ """Discriminator that checks that it only sees pooled Tensors."""
+ self.assertFalse(constant_op.is_constant(inputs))
+ return inputs
+ model = model._replace(
+ discriminator_fn=checker_dis_fn)
+ train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
+
+ def test_doesnt_crash_when_in_nested_scope(self):
+ with variable_scope.variable_scope('outer_scope'):
+ gan_model = train.gan_model(
+ generator_model,
+ discriminator_model,
+ real_data=array_ops.zeros([1, 2]),
+ generator_inputs=random_ops.random_normal([1, 2]))
- def test_callable_cyclegan(self):
- self._test_cyclegan_helper(create_callable_cyclegan_model)
+ # This should work inside a scope.
+ train.gan_loss(gan_model, gradient_penalty_weight=1.0)
- def _check_tensor_pool_adjusted_model_outputs(self, tensor1, tensor2,
- pool_size):
+ # This should also work outside a scope.
+ train.gan_loss(gan_model, gradient_penalty_weight=1.0)
+
+
+class TensorPoolAdjusteModelTest(test.TestCase):
+
+ def _check_tensor_pool_adjusted_model_outputs(
+ self, tensor1, tensor2, pool_size):
history_values = []
with self.test_session(use_gpu=True) as sess:
variables.global_variables_initializer().run()
@@ -548,115 +683,66 @@ class GANLossTest(test.TestCase):
# pool).
self.assertTrue(any([(v == t2).all() for v in history_values]))
- # Test `_tensor_pool_adjusted_model` for gan model.
- def test_tensor_pool_adjusted_model_gan(self):
- model = create_gan_model()
-
- new_model = train._tensor_pool_adjusted_model(model, None)
+ def _make_new_model_and_check(self, model, pool_size):
+ pool_fn = lambda x: random_tensor_pool.tensor_pool(x, pool_size=pool_size)
+ new_model = train._tensor_pool_adjusted_model(model, pool_fn)
# 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
- self.assertIs(new_model.discriminator_gen_outputs,
- model.discriminator_gen_outputs)
-
- pool_size = 5
- new_model = train._tensor_pool_adjusted_model(
- model, get_tensor_pool_fn(pool_size=pool_size))
self.assertIsNot(new_model.discriminator_gen_outputs,
model.discriminator_gen_outputs)
+
+ return new_model
+
+ def test_tensor_pool_adjusted_model_gan(self):
+ """Test `_tensor_pool_adjusted_model` for gan model."""
+ pool_size = 5
+ model = create_gan_model()
+ new_model = self._make_new_model_and_check(model, pool_size)
+
# Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
- # Test _tensor_pool_adjusted_model for infogan model.
def test_tensor_pool_adjusted_model_infogan(self):
+ """Test _tensor_pool_adjusted_model for infogan model."""
+ pool_size = 5
model = create_infogan_model()
+ new_model = self._make_new_model_and_check(model, pool_size)
- pool_size = 5
- new_model = train._tensor_pool_adjusted_model(
- model, get_tensor_pool_fn_for_infogan(pool_size=pool_size))
- # 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
- self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
- self.assertIsNot(new_model.discriminator_gen_outputs,
- model.discriminator_gen_outputs)
+ # Check values.
self.assertIsNot(new_model.predicted_distributions,
model.predicted_distributions)
- # Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
- # Test _tensor_pool_adjusted_model for acgan model.
def test_tensor_pool_adjusted_model_acgan(self):
+ """Test _tensor_pool_adjusted_model for acgan model."""
+ pool_size = 5
model = create_acgan_model()
+ new_model = self._make_new_model_and_check(model, pool_size)
- pool_size = 5
- new_model = train._tensor_pool_adjusted_model(
- model, get_tensor_pool_fn(pool_size=pool_size))
- # 'Generator/dummy_g:0' and 'Discriminator/dummy_d:0'
- self.assertEqual(2, len(ops.get_collection(ops.GraphKeys.VARIABLES)))
- self.assertIsNot(new_model.discriminator_gen_outputs,
- model.discriminator_gen_outputs)
+ # Check values.
self.assertIsNot(new_model.discriminator_gen_classification_logits,
model.discriminator_gen_classification_logits)
- # Check values.
self._check_tensor_pool_adjusted_model_outputs(
model.discriminator_gen_outputs, new_model.discriminator_gen_outputs,
pool_size)
- # Test tensor pool.
- def _test_tensor_pool_helper(self, create_gan_model_fn):
- model = create_gan_model_fn()
- if isinstance(model, namedtuples.InfoGANModel):
- tensor_pool_fn = get_tensor_pool_fn_for_infogan(pool_size=5)
- else:
- tensor_pool_fn = get_tensor_pool_fn(pool_size=5)
- loss = train.gan_loss(model, tensor_pool_fn=tensor_pool_fn)
- self.assertTrue(isinstance(loss, namedtuples.GANLoss))
-
- # Check values.
- with self.test_session(use_gpu=True) as sess:
- variables.global_variables_initializer().run()
- for _ in range(10):
- sess.run([loss.generator_loss, loss.discriminator_loss])
-
- def test_tensor_pool_gan(self):
- self._test_tensor_pool_helper(create_gan_model)
-
- def test_tensor_pool_callable_gan(self):
- self._test_tensor_pool_helper(create_callable_gan_model)
-
- def test_tensor_pool_infogan(self):
- self._test_tensor_pool_helper(create_infogan_model)
-
- def test_tensor_pool_callable_infogan(self):
- self._test_tensor_pool_helper(create_callable_infogan_model)
-
- def test_tensor_pool_acgan(self):
- self._test_tensor_pool_helper(create_acgan_model)
-
- def test_tensor_pool_callable_acgan(self):
- self._test_tensor_pool_helper(create_callable_acgan_model)
-
- def test_doesnt_crash_when_in_nested_scope(self):
- with variable_scope.variable_scope('outer_scope'):
- gan_model = train.gan_model(
- generator_model,
- discriminator_model,
- real_data=array_ops.zeros([1, 2]),
- generator_inputs=random_ops.random_normal([1, 2]))
-
- # This should work inside a scope.
- train.gan_loss(gan_model, gradient_penalty_weight=1.0)
-
- # This should also work outside a scope.
- train.gan_loss(gan_model, gradient_penalty_weight=1.0)
-
-class GANTrainOpsTest(test.TestCase):
+class GANTrainOpsTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_train_ops`."""
- def _test_output_type_helper(self, create_gan_model_fn):
+ @parameterized.named_parameters(
+ ('gan', create_gan_model),
+ ('callable_gan', create_callable_gan_model),
+ ('infogan', create_infogan_model),
+ ('callable_infogan', create_callable_infogan_model),
+ ('acgan', create_acgan_model),
+ ('callable_acgan', create_callable_acgan_model),
+ )
+ def test_output_type(self, create_gan_model_fn):
model = create_gan_model_fn()
loss = train.gan_loss(model)
@@ -670,28 +756,24 @@ class GANTrainOpsTest(test.TestCase):
summarize_gradients=True,
colocate_gradients_with_ops=True)
- self.assertTrue(isinstance(train_ops, namedtuples.GANTrainOps))
-
- def test_output_type_gan(self):
- self._test_output_type_helper(create_gan_model)
-
- def test_output_type_callable_gan(self):
- self._test_output_type_helper(create_callable_gan_model)
-
- def test_output_type_infogan(self):
- self._test_output_type_helper(create_infogan_model)
-
- def test_output_type_callable_infogan(self):
- self._test_output_type_helper(create_callable_infogan_model)
-
- def test_output_type_acgan(self):
- self._test_output_type_helper(create_acgan_model)
-
- def test_output_type_callable_acgan(self):
- self._test_output_type_helper(create_callable_acgan_model)
+ self.assertIsInstance(train_ops, namedtuples.GANTrainOps)
# TODO(joelshor): Add a test to check that custom update op is run.
- def _test_unused_update_ops(self, create_gan_model_fn, provide_update_ops):
+ @parameterized.named_parameters(
+ ('gan', create_gan_model, False),
+ ('gan_provideupdates', create_gan_model, True),
+ ('callable_gan', create_callable_gan_model, False),
+ ('callable_gan_provideupdates', create_callable_gan_model, True),
+ ('infogan', create_infogan_model, False),
+ ('infogan_provideupdates', create_infogan_model, True),
+ ('callable_infogan', create_callable_infogan_model, False),
+ ('callable_infogan_provideupdates', create_callable_infogan_model, True),
+ ('acgan', create_acgan_model, False),
+ ('acgan_provideupdates', create_acgan_model, True),
+ ('callable_acgan', create_callable_acgan_model, False),
+ ('callable_acgan_provideupdates', create_callable_acgan_model, True),
+ )
+ def test_unused_update_ops(self, create_gan_model_fn, provide_update_ops):
model = create_gan_model_fn()
loss = train.gan_loss(model)
@@ -707,8 +789,11 @@ class GANTrainOpsTest(test.TestCase):
# Add an update op outside the generator and discriminator scopes.
if provide_update_ops:
- kwargs = {'update_ops':
- [constant_op.constant(1.0), gen_update_op, dis_update_op]}
+ kwargs = {
+ 'update_ops': [
+ constant_op.constant(1.0), gen_update_op, dis_update_op
+ ]
+ }
else:
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, constant_op.constant(1.0))
kwargs = {}
@@ -717,8 +802,8 @@ class GANTrainOpsTest(test.TestCase):
d_opt = gradient_descent.GradientDescentOptimizer(1.0)
with self.assertRaisesRegexp(ValueError, 'There are unused update ops:'):
- train.gan_train_ops(model, loss, g_opt, d_opt,
- check_for_unused_update_ops=True, **kwargs)
+ train.gan_train_ops(
+ model, loss, g_opt, d_opt, check_for_unused_update_ops=True, **kwargs)
train_ops = train.gan_train_ops(
model, loss, g_opt, d_opt, check_for_unused_update_ops=False, **kwargs)
@@ -735,44 +820,16 @@ class GANTrainOpsTest(test.TestCase):
self.assertEqual(1, gen_update_count.eval())
self.assertEqual(1, dis_update_count.eval())
- def test_unused_update_ops_gan(self):
- self._test_unused_update_ops(create_gan_model, False)
-
- def test_unused_update_ops_gan_provideupdates(self):
- self._test_unused_update_ops(create_gan_model, True)
-
- def test_unused_update_ops_callable_gan(self):
- self._test_unused_update_ops(create_callable_gan_model, False)
-
- def test_unused_update_ops_callable_gan_provideupdates(self):
- self._test_unused_update_ops(create_callable_gan_model, True)
-
- def test_unused_update_ops_infogan(self):
- self._test_unused_update_ops(create_infogan_model, False)
-
- def test_unused_update_ops_infogan_provideupdates(self):
- self._test_unused_update_ops(create_infogan_model, True)
-
- def test_unused_update_ops_callable_infogan(self):
- self._test_unused_update_ops(create_callable_infogan_model, False)
-
- def test_unused_update_ops_callable_infogan_provideupdates(self):
- self._test_unused_update_ops(create_callable_infogan_model, True)
-
- def test_unused_update_ops_acgan(self):
- self._test_unused_update_ops(create_acgan_model, False)
-
- def test_unused_update_ops_acgan_provideupdates(self):
- self._test_unused_update_ops(create_acgan_model, True)
-
- def test_unused_update_ops_callable_acgan(self):
- self._test_unused_update_ops(create_callable_acgan_model, False)
-
- def test_unused_update_ops_callable_acgan_provideupdates(self):
- self._test_unused_update_ops(create_callable_acgan_model, True)
-
- def _test_sync_replicas_helper(
- self, create_gan_model_fn, create_global_step=False):
+ @parameterized.named_parameters(
+ ('gan', create_gan_model, False),
+ ('callable_gan', create_callable_gan_model, False),
+ ('infogan', create_infogan_model, False),
+ ('callable_infogan', create_callable_infogan_model, False),
+ ('acgan', create_acgan_model, False),
+ ('callable_acgan', create_callable_acgan_model, False),
+ ('gan_canbeint32', create_gan_model, True),
+ )
+ def test_sync_replicas(self, create_gan_model_fn, create_global_step):
model = create_gan_model_fn()
loss = train.gan_loss(model)
num_trainable_vars = len(variables_lib.get_trainable_variables())
@@ -785,11 +842,8 @@ class GANTrainOpsTest(test.TestCase):
g_opt = get_sync_optimizer()
d_opt = get_sync_optimizer()
train_ops = train.gan_train_ops(
- model,
- loss,
- generator_optimizer=g_opt,
- discriminator_optimizer=d_opt)
- self.assertTrue(isinstance(train_ops, namedtuples.GANTrainOps))
+ model, loss, generator_optimizer=g_opt, discriminator_optimizer=d_opt)
+ self.assertIsInstance(train_ops, namedtuples.GANTrainOps)
# No new trainable variables should have been added.
self.assertEqual(num_trainable_vars,
len(variables_lib.get_trainable_variables()))
@@ -827,29 +881,8 @@ class GANTrainOpsTest(test.TestCase):
coord.request_stop()
coord.join(g_threads + d_threads)
- def test_sync_replicas_gan(self):
- self._test_sync_replicas_helper(create_gan_model)
-
- def test_sync_replicas_callable_gan(self):
- self._test_sync_replicas_helper(create_callable_gan_model)
-
- def test_sync_replicas_infogan(self):
- self._test_sync_replicas_helper(create_infogan_model)
-
- def test_sync_replicas_callable_infogan(self):
- self._test_sync_replicas_helper(create_callable_infogan_model)
-
- def test_sync_replicas_acgan(self):
- self._test_sync_replicas_helper(create_acgan_model)
- def test_sync_replicas_callable_acgan(self):
- self._test_sync_replicas_helper(create_callable_acgan_model)
-
- def test_global_step_can_be_int32(self):
- self._test_sync_replicas_helper(create_gan_model, create_global_step=True)
-
-
-class GANTrainTest(test.TestCase):
+class GANTrainTest(test.TestCase, parameterized.TestCase):
"""Tests for `gan_train`."""
def _gan_train_ops(self, generator_add, discriminator_add):
@@ -860,12 +893,20 @@ class GANTrainTest(test.TestCase):
# joint training.
train_ops = namedtuples.GANTrainOps(
generator_train_op=step.assign_add(generator_add, use_locking=True),
- discriminator_train_op=step.assign_add(discriminator_add,
- use_locking=True),
+ discriminator_train_op=step.assign_add(
+ discriminator_add, use_locking=True),
global_step_inc_op=step.assign_add(1))
return train_ops
- def _test_run_helper(self, create_gan_model_fn):
+ @parameterized.named_parameters(
+ ('gan', create_gan_model),
+ ('callable_gan', create_callable_gan_model),
+ ('infogan', create_infogan_model),
+ ('callable_infogan', create_callable_infogan_model),
+ ('acgan', create_acgan_model),
+ ('callable_acgan', create_callable_acgan_model),
+ )
+ def test_run_helper(self, create_gan_model_fn):
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
loss = train.gan_loss(model)
@@ -881,30 +922,15 @@ class GANTrainTest(test.TestCase):
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
- def test_run_gan(self):
- self._test_run_helper(create_gan_model)
-
- def test_run_callable_gan(self):
- self._test_run_helper(create_callable_gan_model)
-
- def test_run_infogan(self):
- self._test_run_helper(create_infogan_model)
-
- def test_run_callable_infogan(self):
- self._test_run_helper(create_callable_infogan_model)
-
- def test_run_acgan(self):
- self._test_run_helper(create_acgan_model)
-
- def test_run_callable_acgan(self):
- self._test_run_helper(create_callable_acgan_model)
-
- # Test multiple train steps.
- def _test_multiple_steps_helper(self, get_hooks_fn_fn):
+ @parameterized.named_parameters(
+ ('seq_train_steps', train.get_sequential_train_hooks),
+ ('efficient_seq_train_steps', train.get_joint_train_hooks),
+ )
+ def test_multiple_steps(self, get_hooks_fn_fn):
+ """Test multiple train steps."""
train_ops = self._gan_train_ops(generator_add=10, discriminator_add=100)
train_steps = namedtuples.GANTrainSteps(
- generator_train_steps=3,
- discriminator_train_steps=4)
+ generator_train_steps=3, discriminator_train_steps=4)
final_step = train.gan_train(
train_ops,
get_hooks_fn=get_hooks_fn_fn(train_steps),
@@ -914,12 +940,6 @@ class GANTrainTest(test.TestCase):
self.assertTrue(np.isscalar(final_step))
self.assertEqual(1 + 3 * 10 + 4 * 100, final_step)
- def test_multiple_steps_seq_train_steps(self):
- self._test_multiple_steps_helper(train.get_sequential_train_hooks)
-
- def test_multiple_steps_efficient_seq_train_steps(self):
- self._test_multiple_steps_helper(train.get_joint_train_hooks)
-
def test_supervisor_run_gan_model_train_ops_multiple_steps(self):
step = training_util.create_global_step()
train_ops = namedtuples.GANTrainOps(
@@ -927,8 +947,7 @@ class GANTrainTest(test.TestCase):
discriminator_train_op=constant_op.constant(2.0),
global_step_inc_op=step.assign_add(1))
train_steps = namedtuples.GANTrainSteps(
- generator_train_steps=3,
- discriminator_train_steps=4)
+ generator_train_steps=3, discriminator_train_steps=4)
final_loss = slim_learning.train(
train_op=train_ops,
@@ -940,10 +959,18 @@ class GANTrainTest(test.TestCase):
self.assertEqual(17.0, final_loss)
-class PatchGANTest(test.TestCase):
+class PatchGANTest(test.TestCase, parameterized.TestCase):
"""Tests that functions work on PatchGAN style output."""
- def _test_patchgan_helper(self, create_gan_model_fn):
+ @parameterized.named_parameters(
+ ('gan', create_gan_model),
+ ('callable_gan', create_callable_gan_model),
+ ('infogan', create_infogan_model),
+ ('callable_infogan', create_callable_infogan_model),
+ ('acgan', create_acgan_model),
+ ('callable_acgan', create_callable_acgan_model),
+ )
+ def test_patchgan(self, create_gan_model_fn):
"""Ensure that patch-based discriminators work end-to-end."""
random_seed.set_random_seed(1234)
model = create_gan_model_fn()
@@ -960,24 +987,6 @@ class PatchGANTest(test.TestCase):
self.assertTrue(np.isscalar(final_step))
self.assertEqual(2, final_step)
- def test_patchgan_gan(self):
- self._test_patchgan_helper(create_gan_model)
-
- def test_patchgan_callable_gan(self):
- self._test_patchgan_helper(create_callable_gan_model)
-
- def test_patchgan_infogan(self):
- self._test_patchgan_helper(create_infogan_model)
-
- def test_patchgan_callable_infogan(self):
- self._test_patchgan_helper(create_callable_infogan_model)
-
- def test_patchgan_acgan(self):
- self._test_patchgan_helper(create_acgan_model)
-
- def test_patchgan_callable_acgan(self):
- self._test_patchgan_helper(create_callable_acgan_model)
-
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/contrib/gdr/gdr_memory_manager.cc b/tensorflow/contrib/gdr/gdr_memory_manager.cc
index 1435e19109..f3bbf6b4d7 100644
--- a/tensorflow/contrib/gdr/gdr_memory_manager.cc
+++ b/tensorflow/contrib/gdr/gdr_memory_manager.cc
@@ -33,10 +33,11 @@ limitations under the License.
#include "tensorflow/core/common_runtime/bfc_allocator.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
+#include "tensorflow/core/common_runtime/pool_allocator.h"
+#include "tensorflow/core/common_runtime/process_state.h"
#if GOOGLE_CUDA
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
-#include "tensorflow/core/common_runtime/process_state.h"
#endif // GOOGLE_CUDA
#include "tensorflow/core/framework/allocator_registry.h"
#include "tensorflow/core/lib/core/status.h"
@@ -182,28 +183,25 @@ class GdrMemoryManager : public RemoteMemoryManager {
TF_DISALLOW_COPY_AND_ASSIGN(GdrMemoryManager);
};
-// TODO(byronyi): remove this class duplicated from the one in
-// common/runtime/gpu/pool_allocator.h when it is available in common_runtime
-class BasicCPUAllocator : public SubAllocator {
- public:
- ~BasicCPUAllocator() override {}
-
- void* Alloc(size_t alignment, size_t num_bytes) override {
- return port::AlignedMalloc(num_bytes, alignment);
- }
- void Free(void* ptr, size_t) override { port::AlignedFree(ptr); }
-};
-
// TODO(byronyi): remove this class and its registration when the default
-// cpu_allocator() returns visitable allocator
+// cpu_allocator() returns visitable allocator, or cpu_allocator() is no
+// longer in use.
class BFCRdmaAllocator : public BFCAllocator {
public:
BFCRdmaAllocator()
- : BFCAllocator(new BasicCPUAllocator(), 1LL << 36, true, "cpu_rdma_bfc") {
+ : BFCAllocator(new BasicCPUAllocator(port::kNUMANoAffinity), 1LL << 36,
+ true, "cpu_rdma_bfc") {}
+};
+class BFCRdmaAllocatorFactory : public AllocatorFactory {
+ public:
+ Allocator* CreateAllocator() override { return new BFCRdmaAllocator; }
+
+ virtual SubAllocator* CreateSubAllocator(int numa_node) {
+ return new BasicCPUAllocator(numa_node);
}
};
-REGISTER_MEM_ALLOCATOR("BFCRdmaAllocator", 101, BFCRdmaAllocator);
+REGISTER_MEM_ALLOCATOR("BFCRdmaAllocator", 101, BFCRdmaAllocatorFactory);
GdrMemoryManager::GdrMemoryManager(const string& host, const string& port)
: host_(host),
@@ -276,8 +274,8 @@ Status GdrMemoryManager::Init() {
Allocator* allocators[] = {
#if GOOGLE_CUDA
GPUProcessState::singleton()->GetCUDAHostAllocator(0),
- ProcessState::singleton()->GetCPUAllocator(0),
#endif // GOOGLE_CUDA
+ ProcessState::singleton()->GetCPUAllocator(0),
cpu_allocator(),
};
diff --git a/tensorflow/contrib/graph_editor/reroute.py b/tensorflow/contrib/graph_editor/reroute.py
index 95c02a64d4..d42e0c01f4 100644
--- a/tensorflow/contrib/graph_editor/reroute.py
+++ b/tensorflow/contrib/graph_editor/reroute.py
@@ -208,9 +208,9 @@ def _reroute_ts(ts0, ts1, mode, can_modify=None, cannot_modify=None):
def swap_ts(ts0, ts1, can_modify=None, cannot_modify=None):
"""For each tensor's pair, swap the end of (t0,t1).
- B0 B1 B0 B1
- | | => X
- A0 A1 A0 A1
+ B0 B1 B0 B1
+ | | => X
+ A0 A1 A0 A1
Args:
ts0: an object convertible to a list of `tf.Tensor`.
@@ -233,9 +233,9 @@ def swap_ts(ts0, ts1, can_modify=None, cannot_modify=None):
def reroute_ts(ts0, ts1, can_modify=None, cannot_modify=None):
"""For each tensor's pair, replace the end of t1 by the end of t0.
- B0 B1 B0 B1
- | | => |/
- A0 A1 A0 A1
+ B0 B1 B0 B1
+ | | => |/
+ A0 A1 A0 A1
The end of the tensors in ts1 are left dangling.
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index beeabd6b65..dd602cf3a9 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -1702,19 +1702,22 @@ def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
return utils.collect_named_outputs(output_collections, sc, flattened)
-def _model_variable_getter(getter,
- name,
- shape=None,
- dtype=None,
- initializer=None,
- regularizer=None,
- trainable=True,
- collections=None,
- caching_device=None,
- partitioner=None,
- rename=None,
- use_resource=None,
- **_):
+def _model_variable_getter(
+ getter,
+ name,
+ shape=None,
+ dtype=None,
+ initializer=None,
+ regularizer=None,
+ trainable=True,
+ collections=None,
+ caching_device=None,
+ partitioner=None,
+ rename=None,
+ use_resource=None,
+ synchronization=tf_variables.VariableSynchronization.AUTO,
+ aggregation=tf_variables.VariableAggregation.NONE,
+ **_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
@@ -1732,7 +1735,9 @@ def _model_variable_getter(getter,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
- use_resource=use_resource)
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
def _build_variable_getter(rename=None):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/head.py b/tensorflow/contrib/learn/python/learn/estimators/head.py
index 339c4e0e36..ded93d4a7f 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/head.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/head.py
@@ -563,10 +563,10 @@ def _mean_squared_loss(labels, logits, weights=None):
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
- labels = array_ops.expand_dims(labels, dim=(1,))
+ labels = array_ops.expand_dims(labels, axis=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
- logits = array_ops.expand_dims(logits, dim=(1,))
+ logits = array_ops.expand_dims(logits, axis=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = math_ops.square(logits - math_ops.to_float(labels), name=name)
return _compute_weighted_loss(loss, weights)
@@ -579,10 +579,10 @@ def _poisson_loss(labels, logits, weights=None):
labels = ops.convert_to_tensor(labels)
# To prevent broadcasting inside "-".
if len(labels.get_shape()) == 1:
- labels = array_ops.expand_dims(labels, dim=(1,))
+ labels = array_ops.expand_dims(labels, axis=(1,))
# TODO(zakaria): make sure it does not recreate the broadcast bug.
if len(logits.get_shape()) == 1:
- logits = array_ops.expand_dims(logits, dim=(1,))
+ logits = array_ops.expand_dims(logits, axis=(1,))
logits.get_shape().assert_is_compatible_with(labels.get_shape())
loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,
name=name)
@@ -797,7 +797,7 @@ def _log_loss_with_two_classes(labels, logits, weights=None):
# TODO(ptucker): This will break for dynamic shapes.
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.
if len(labels.get_shape()) == 1:
- labels = array_ops.expand_dims(labels, dim=(1,))
+ labels = array_ops.expand_dims(labels, axis=(1,))
loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,
name=name)
return _compute_weighted_loss(loss, weights)
diff --git a/tensorflow/contrib/learn/python/learn/estimators/run_config.py b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
index 14ee2ba609..7cb87619d9 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/run_config.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
@@ -240,6 +240,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
keep_checkpoint_max=5,
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
+ protocol=None,
evaluation_master='',
model_dir=None,
session_config=None):
@@ -289,6 +290,8 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
session_config: a ConfigProto used to set session parameters, or None.
Note - using this argument, it is easy to provide settings which break
otherwise perfectly good models. Use with care.
+ protocol: An optional argument which specifies the protocol used when
+ starting server. None means default to grpc.
"""
# Neither parent class calls super().__init__(), so here we have to
# manually call their __init__() methods.
@@ -313,6 +316,7 @@ class RunConfig(ClusterConfig, core_run_config.RunConfig):
self._save_summary_steps = save_summary_steps
self._save_checkpoints_secs = save_checkpoints_secs
self._log_step_count_steps = log_step_count_steps
+ self._protocol = protocol
self._session_config = session_config
if save_checkpoints_secs == RunConfig._USE_DEFAULT:
if save_checkpoints_steps is None:
diff --git a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
index 5e7b422e3c..e742447208 100644
--- a/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
+++ b/tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
@@ -625,11 +625,13 @@ def attention_decoder(decoder_inputs,
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
- k = variable_scope.get_variable("AttnW_%d" % a,
- [1, 1, attn_size, attention_vec_size])
+ k = variable_scope.get_variable(
+ "AttnW_%d" % a, [1, 1, attn_size, attention_vec_size],
+ dtype=dtype)
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(
- variable_scope.get_variable("AttnV_%d" % a, [attention_vec_size]))
+ variable_scope.get_variable(
+ "AttnV_%d" % a, [attention_vec_size], dtype=dtype))
state = initial_state
@@ -647,11 +649,13 @@ def attention_decoder(decoder_inputs,
with variable_scope.variable_scope("Attention_%d" % a):
y = Linear(query, attention_vec_size, True)(query)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
+ y = math_ops.cast(y, dtype)
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(v[a] * math_ops.tanh(hidden_features[a] + y),
[2, 3])
- a = nn_ops.softmax(s)
+ a = nn_ops.softmax(math_ops.cast(s, dtype=dtypes.float32))
# Now calculate the attention-weighted vector d.
+ a = math_ops.cast(a, dtype)
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden, [1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
@@ -681,6 +685,7 @@ def attention_decoder(decoder_inputs,
raise ValueError("Could not infer input size from input: %s" % inp.name)
inputs = [inp] + attns
+ inputs = [math_ops.cast(e, dtype) for e in inputs]
x = Linear(inputs, input_size, True)(inputs)
# Run the RNN.
cell_output, state = cell(x, state)
@@ -693,6 +698,7 @@ def attention_decoder(decoder_inputs,
attns = attention(state)
with variable_scope.variable_scope("AttnOutputProjection"):
+ cell_output = math_ops.cast(cell_output, dtype)
inputs = [cell_output] + attns
output = Linear(inputs, output_size, True)(inputs)
if loop_function is not None:
diff --git a/tensorflow/contrib/lite/BUILD b/tensorflow/contrib/lite/BUILD
index b95d4d0fce..7d7dd6b708 100644
--- a/tensorflow/contrib/lite/BUILD
+++ b/tensorflow/contrib/lite/BUILD
@@ -47,6 +47,10 @@ cc_test(
name = "arena_planner_test",
size = "small",
srcs = ["arena_planner_test.cc"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable",
+ ],
deps = [
":arena_planner",
"//tensorflow/contrib/lite/testing:util",
@@ -200,6 +204,7 @@ cc_test(
name = "graph_info_test",
size = "small",
srcs = ["graph_info_test.cc"],
+ tags = ["no_oss"],
deps = [
":framework",
":string_util",
@@ -244,6 +249,7 @@ cc_test(
name = "op_resolver_test",
size = "small",
srcs = ["op_resolver_test.cc"],
+ tags = ["no_oss"],
deps = [
":framework",
"//tensorflow/contrib/lite/testing:util",
@@ -276,6 +282,7 @@ cc_test(
name = "util_test",
size = "small",
srcs = ["util_test.cc"],
+ tags = ["no_oss"],
deps = [
":context",
":util",
diff --git a/tensorflow/contrib/lite/Makefile b/tensorflow/contrib/lite/Makefile
index a616138d33..df5954744a 100644
--- a/tensorflow/contrib/lite/Makefile
+++ b/tensorflow/contrib/lite/Makefile
@@ -82,8 +82,9 @@ endif
# Settings for the host compiler.
CXX := $(CC_PREFIX) ${TARGET_TOOLCHAIN_PREFIX}g++
-CXXFLAGS += --std=c++11 -O3 -DNDEBUG
+CXXFLAGS += -O3 -DNDEBUG
CCFLAGS := ${CXXFLAGS}
+CXXFLAGS += --std=c++11
CC := $(CC_PREFIX) ${TARGET_TOOLCHAIN_PREFIX}gcc
AR := $(CC_PREFIX) ${TARGET_TOOLCHAIN_PREFIX}ar
CFLAGS :=
diff --git a/tensorflow/contrib/lite/arena_planner.cc b/tensorflow/contrib/lite/arena_planner.cc
index 4257e754ad..02442575b3 100644
--- a/tensorflow/contrib/lite/arena_planner.cc
+++ b/tensorflow/contrib/lite/arena_planner.cc
@@ -17,14 +17,6 @@ limitations under the License.
namespace tflite {
-namespace {
-
-// Memory allocation tuning
-constexpr const int kDefaultArenaAlignment = 64;
-constexpr const int kDefaultTensorAlignment = 4;
-
-} // namespace
-
struct AllocationInfo {
// The node index requesting this allocation.
int node;
@@ -36,12 +28,16 @@ struct AllocationInfo {
ArenaPlanner::ArenaPlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info,
- bool preserve_inputs)
+ bool preserve_inputs, bool preserve_intermediates,
+ int tensor_alignment)
: context_(context),
graph_info_(std::move(graph_info)),
arena_(kDefaultArenaAlignment),
persistent_arena_(kDefaultArenaAlignment),
- preserve_inputs_(preserve_inputs) {}
+ preserve_inputs_(preserve_inputs),
+ preserve_intermediates_(preserve_intermediates),
+ tensor_alignment_(tensor_alignment) {}
+
ArenaPlanner::~ArenaPlanner() {}
int64_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
@@ -164,13 +160,15 @@ TfLiteStatus ArenaPlanner::PlanAllocations() {
// Then update the ref-counts of the node's inputs, and if necessary queue
// them for deallocation.
- TfLiteIntArray* node_inputs = node.inputs;
- for (int j = 0; j < node_inputs->size; ++j) {
- int tensor_index = node_inputs->data[j];
- if (tensor_index != kOptionalTensor) {
- refcounts[tensor_index]--;
- if (refcounts[tensor_index] == 0) {
- TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
+ if (!preserve_intermediates_) {
+ TfLiteIntArray* node_inputs = node.inputs;
+ for (int j = 0; j < node_inputs->size; ++j) {
+ int tensor_index = node_inputs->data[j];
+ if (tensor_index != kOptionalTensor) {
+ refcounts[tensor_index]--;
+ if (refcounts[tensor_index] == 0) {
+ TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
+ }
}
}
}
@@ -261,14 +259,12 @@ TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int tensor_index) {
TfLiteStatus ArenaPlanner::CalculateTensorAllocation(int tensor_index) {
TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
if (tensor.allocation_type == kTfLiteArenaRw) {
- TF_LITE_ENSURE_STATUS(arena_.Allocate(context_, kDefaultTensorAlignment,
- tensor.bytes,
- &allocs_[tensor_index]));
+ TF_LITE_ENSURE_STATUS(arena_.Allocate(
+ context_, tensor_alignment_, tensor.bytes, &allocs_[tensor_index]));
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
- TF_LITE_ENSURE_STATUS(
- persistent_arena_.Allocate(context_, kDefaultTensorAlignment,
- tensor.bytes, &allocs_[tensor_index]));
+ TF_LITE_ENSURE_STATUS(persistent_arena_.Allocate(
+ context_, tensor_alignment_, tensor.bytes, &allocs_[tensor_index]));
}
return kTfLiteOk;
}
diff --git a/tensorflow/contrib/lite/arena_planner.h b/tensorflow/contrib/lite/arena_planner.h
index 1d84950e91..55003cf4e9 100644
--- a/tensorflow/contrib/lite/arena_planner.h
+++ b/tensorflow/contrib/lite/arena_planner.h
@@ -25,6 +25,10 @@ limitations under the License.
namespace tflite {
+// Memory allocation tuning
+constexpr const int kDefaultArenaAlignment = 64;
+constexpr const int kDefaultTensorAlignment = 64;
+
struct AllocationInfo;
// A memory planner that makes all the allocations using arenas.
@@ -47,7 +51,8 @@ class ArenaPlanner : public MemoryPlanner {
// graph will not share memory with any other tensor, effectively preserving
// them until the end of inference.
ArenaPlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info,
- bool preserve_inputs);
+ bool preserve_inputs, bool preserve_intermediates,
+ int tensor_alignment = kDefaultTensorAlignment);
~ArenaPlanner() override;
ArenaPlanner(const ArenaPlanner&) = delete;
ArenaPlanner& operator=(const ArenaPlanner&) = delete;
@@ -104,7 +109,17 @@ class ArenaPlanner : public MemoryPlanner {
// declared as kTfLiteArenaRwPersistent.
SimpleMemoryArena persistent_arena_;
+ // Ensure that the memory self-allocated for inputs is never reused by the
+ // allocator. This allows for example, multiple runs without getting
+ // unpredictable results.
bool preserve_inputs_;
+
+ // If true, then no overlapping of memory areas is done, meaning intermediates
+ // results can be queried after running (modulo running delegates).
+ bool preserve_intermediates_;
+
+ // Number of bytes that tensor buffers should be aligned to.
+ int tensor_alignment_;
};
} // namespace tflite
diff --git a/tensorflow/contrib/lite/arena_planner_test.cc b/tensorflow/contrib/lite/arena_planner_test.cc
index f5bd1932f9..7d7c41289c 100644
--- a/tensorflow/contrib/lite/arena_planner_test.cc
+++ b/tensorflow/contrib/lite/arena_planner_test.cc
@@ -24,6 +24,8 @@ limitations under the License.
namespace tflite {
namespace {
+constexpr const int kTensorAlignment = 4;
+
// A simple op to be used in tests, as syntactic sugar.
class TestOp {
public:
@@ -156,7 +158,7 @@ class ArenaPlannerTest : public ::testing::Test {
context_.ReportError = ReportError;
planner_.reset(new ArenaPlanner(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)),
- preserve_inputs));
+ preserve_inputs, /*preserve intermediates*/ false, kTensorAlignment));
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
@@ -178,8 +180,8 @@ class ArenaPlannerTest : public ::testing::Test {
const TfLiteTensor& tensor = (*graph_->tensors())[tensor_index];
int64_t offset = GetOffset(tensor_index) + tensor.bytes;
// We must make sure the offset is aligned to kDefaultArenaAlignment.
- if (offset % 4 != 0) {
- offset += 4 - offset % 4;
+ if (offset % kTensorAlignment != 0) {
+ offset += kTensorAlignment - offset % kTensorAlignment;
}
return offset;
};
diff --git a/tensorflow/contrib/lite/build_def.bzl b/tensorflow/contrib/lite/build_def.bzl
index b735d08b4b..7c13f9011e 100644
--- a/tensorflow/contrib/lite/build_def.bzl
+++ b/tensorflow/contrib/lite/build_def.bzl
@@ -1,193 +1,211 @@
"""Generate Flatbuffer binary from json."""
+
load(
"//tensorflow:tensorflow.bzl",
"tf_cc_test",
+ "tf_cc_shared_object",
)
def tflite_copts():
- """Defines compile time flags."""
- copts = [
- "-DFARMHASH_NO_CXX_STRING",
- ] + select({
- str(Label("//tensorflow:android_arm64")): [
- "-std=c++11",
- "-O3",
- ],
- str(Label("//tensorflow:android_arm")): [
- "-mfpu=neon",
- "-mfloat-abi=softfp",
- "-std=c++11",
- "-O3",
- ],
- str(Label("//tensorflow:android_x86")): [
- "-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK",
- ],
- str(Label("//tensorflow:ios_x86_64")): [
- "-msse4.1",
- ],
- "//conditions:default": [],
- }) + select({
- str(Label("//tensorflow:with_default_optimizations")): [],
- "//conditions:default": ["-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK"],
- })
+ """Defines compile time flags."""
+ copts = [
+ "-DFARMHASH_NO_CXX_STRING",
+ ] + select({
+ str(Label("//tensorflow:android_arm64")): [
+ "-std=c++11",
+ "-O3",
+ ],
+ str(Label("//tensorflow:android_arm")): [
+ "-mfpu=neon",
+ "-mfloat-abi=softfp",
+ "-std=c++11",
+ "-O3",
+ ],
+ str(Label("//tensorflow:android_x86")): [
+ "-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK",
+ ],
+ str(Label("//tensorflow:ios_x86_64")): [
+ "-msse4.1",
+ ],
+ "//conditions:default": [],
+ }) + select({
+ str(Label("//tensorflow:with_default_optimizations")): [],
+ "//conditions:default": ["-DGEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK"],
+ })
- return copts
+ return copts
LINKER_SCRIPT = "//tensorflow/contrib/lite/java/src/main/native:version_script.lds"
def tflite_linkopts_unstripped():
- """Defines linker flags to reduce size of TFLite binary.
+ """Defines linker flags to reduce size of TFLite binary.
- These are useful when trying to investigate the relative size of the
- symbols in TFLite.
+ These are useful when trying to investigate the relative size of the
+ symbols in TFLite.
- Returns:
- a select object with proper linkopts
- """
- return select({
- "//tensorflow:android": [
- "-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
- "-Wl,--exclude-libs,ALL", # Exclude syms in all libs from auto export.
- "-Wl,--gc-sections", # Eliminate unused code and data.
- "-Wl,--as-needed", # Don't link unused libs.
- ],
- "//tensorflow/contrib/lite:mips": [],
- "//tensorflow/contrib/lite:mips64": [],
- "//conditions:default": [
- "-Wl,--icf=all", # Identical code folding.
- ],
- })
+ Returns:
+ a select object with proper linkopts
+ """
+ return select({
+ "//tensorflow:android": [
+ "-Wl,--no-export-dynamic", # Only inc syms referenced by dynamic obj.
+ "-Wl,--exclude-libs,ALL", # Exclude syms in all libs from auto export.
+ "-Wl,--gc-sections", # Eliminate unused code and data.
+ "-Wl,--as-needed", # Don't link unused libs.
+ ],
+ "//tensorflow/contrib/lite:mips": [],
+ "//tensorflow/contrib/lite:mips64": [],
+ "//conditions:default": [
+ "-Wl,--icf=all", # Identical code folding.
+ ],
+ })
def tflite_jni_linkopts_unstripped():
- """Defines linker flags to reduce size of TFLite binary with JNI.
+ """Defines linker flags to reduce size of TFLite binary with JNI.
- These are useful when trying to investigate the relative size of the
- symbols in TFLite.
+ These are useful when trying to investigate the relative size of the
+ symbols in TFLite.
- Returns:
- a select object with proper linkopts
- """
- return select({
- "//tensorflow:android": [
- "-Wl,--gc-sections", # Eliminate unused code and data.
- "-Wl,--as-needed", # Don't link unused libs.
- ],
- "//tensorflow/contrib/lite:mips": [],
- "//tensorflow/contrib/lite:mips64": [],
- "//conditions:default": [
- "-Wl,--icf=all", # Identical code folding.
- ],
- })
+ Returns:
+ a select object with proper linkopts
+ """
+ return select({
+ "//tensorflow:android": [
+ "-Wl,--gc-sections", # Eliminate unused code and data.
+ "-Wl,--as-needed", # Don't link unused libs.
+ ],
+ "//tensorflow/contrib/lite:mips": [],
+ "//tensorflow/contrib/lite:mips64": [],
+ "//conditions:default": [
+ "-Wl,--icf=all", # Identical code folding.
+ ],
+ })
def tflite_linkopts():
- """Defines linker flags to reduce size of TFLite binary."""
- return tflite_linkopts_unstripped() + select({
- "//tensorflow:android": [
- "-s", # Omit symbol table.
- ],
- "//conditions:default": [],
- })
+ """Defines linker flags to reduce size of TFLite binary."""
+ return tflite_linkopts_unstripped() + select({
+ "//tensorflow:android": [
+ "-s", # Omit symbol table.
+ ],
+ "//conditions:default": [],
+ })
def tflite_jni_linkopts():
- """Defines linker flags to reduce size of TFLite binary with JNI."""
- return tflite_jni_linkopts_unstripped() + select({
- "//tensorflow:android": [
- "-s", # Omit symbol table.
- "-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
- ],
- "//conditions:default": [],
- })
+ """Defines linker flags to reduce size of TFLite binary with JNI."""
+ return tflite_jni_linkopts_unstripped() + select({
+ "//tensorflow:android": [
+ "-s", # Omit symbol table.
+ "-latomic", # Required for some uses of ISO C++11 <atomic> in x86.
+ ],
+ "//conditions:default": [],
+ })
+
+def tflite_jni_binary(
+ name,
+ copts = tflite_copts(),
+ linkopts = tflite_jni_linkopts(),
+ linkscript = LINKER_SCRIPT,
+ linkshared = 1,
+ linkstatic = 1,
+ deps = []):
+ """Builds a jni binary for TFLite."""
+ linkopts = linkopts + [
+ "-Wl,--version-script", # Export only jni functions & classes.
+ "$(location {})".format(linkscript),
+ ]
+ native.cc_binary(
+ name = name,
+ copts = copts,
+ linkshared = linkshared,
+ linkstatic = linkstatic,
+ deps = deps + [linkscript],
+ linkopts = linkopts,
+ )
-def tflite_jni_binary(name,
- copts=tflite_copts(),
- linkopts=tflite_jni_linkopts(),
- linkscript=LINKER_SCRIPT,
- linkshared=1,
- linkstatic=1,
- deps=[]):
- """Builds a jni binary for TFLite."""
- linkopts = linkopts + [
- "-Wl,--version-script", # Export only jni functions & classes.
- "$(location {})".format(linkscript),
- ]
- native.cc_binary(
+def tflite_cc_shared_object(name,
+ copts=tflite_copts(),
+ linkopts=[],
+ linkstatic=1,
+ deps=[]):
+ """Builds a shared object for TFLite."""
+ tf_cc_shared_object(
name=name,
copts=copts,
- linkshared=linkshared,
linkstatic=linkstatic,
- deps= deps + [linkscript],
- linkopts=linkopts)
+ linkopts=linkopts + tflite_jni_linkopts(),
+ framework_so=[],
+ deps=deps)
def tf_to_tflite(name, src, options, out):
- """Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
+ """Convert a frozen tensorflow graphdef to TF Lite's flatbuffer.
- Args:
- name: Name of rule.
- src: name of the input graphdef file.
- options: options passed to TOCO.
- out: name of the output flatbuffer file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input graphdef file.
+ options: options passed to TOCO.
+ out: name of the output flatbuffer file.
+ """
- toco_cmdline = " ".join([
- "//tensorflow/contrib/lite/toco:toco",
- "--input_format=TENSORFLOW_GRAPHDEF",
- "--output_format=TFLITE",
- ("--input_file=$(location %s)" % src),
- ("--output_file=$(location %s)" % out),
- ] + options )
- native.genrule(
- name = name,
- srcs=[src],
- outs=[out],
- cmd = toco_cmdline,
- tools= ["//tensorflow/contrib/lite/toco:toco"],
- )
+ toco_cmdline = " ".join([
+ "//tensorflow/contrib/lite/toco:toco",
+ "--input_format=TENSORFLOW_GRAPHDEF",
+ "--output_format=TFLITE",
+ ("--input_file=$(location %s)" % src),
+ ("--output_file=$(location %s)" % out),
+ ] + options)
+ native.genrule(
+ name = name,
+ srcs = [src],
+ outs = [out],
+ cmd = toco_cmdline,
+ tools = ["//tensorflow/contrib/lite/toco:toco"],
+ )
def tflite_to_json(name, src, out):
- """Convert a TF Lite flatbuffer to JSON.
+ """Convert a TF Lite flatbuffer to JSON.
- Args:
- name: Name of rule.
- src: name of the input flatbuffer file.
- out: name of the output JSON file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input flatbuffer file.
+ out: name of the output JSON file.
+ """
- flatc = "@flatbuffers//:flatc"
- schema = "//tensorflow/contrib/lite/schema:schema.fbs"
- native.genrule(
- name = name,
- srcs = [schema, src],
- outs = [out],
- cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
- "$(location %s) --raw-binary --strict-json -t" +
- " -o /tmp $(location %s) -- $${TMP}.bin &&" +
- "cp $${TMP}.json $(location %s)")
- % (src, flatc, schema, out),
- tools = [flatc],
- )
+ flatc = "@flatbuffers//:flatc"
+ schema = "//tensorflow/contrib/lite/schema:schema.fbs"
+ native.genrule(
+ name = name,
+ srcs = [schema, src],
+ outs = [out],
+ cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.bin &&" +
+ "$(location %s) --raw-binary --strict-json -t" +
+ " -o /tmp $(location %s) -- $${TMP}.bin &&" +
+ "cp $${TMP}.json $(location %s)") %
+ (src, flatc, schema, out),
+ tools = [flatc],
+ )
def json_to_tflite(name, src, out):
- """Convert a JSON file to TF Lite's flatbuffer.
+ """Convert a JSON file to TF Lite's flatbuffer.
- Args:
- name: Name of rule.
- src: name of the input JSON file.
- out: name of the output flatbuffer file.
- """
+ Args:
+ name: Name of rule.
+ src: name of the input JSON file.
+ out: name of the output flatbuffer file.
+ """
- flatc = "@flatbuffers//:flatc"
- schema = "//tensorflow/contrib/lite/schema:schema_fbs"
- native.genrule(
- name = name,
- srcs = [schema, src],
- outs = [out],
- cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
- "$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
- " -o /tmp $(location %s) $${TMP}.json &&" +
- "cp $${TMP}.bin $(location %s)")
- % (src, flatc, schema, out),
- tools = [flatc],
- )
+ flatc = "@flatbuffers//:flatc"
+ schema = "//tensorflow/contrib/lite/schema:schema_fbs"
+ native.genrule(
+ name = name,
+ srcs = [schema, src],
+ outs = [out],
+ cmd = ("TMP=`mktemp`; cp $(location %s) $${TMP}.json &&" +
+ "$(location %s) --raw-binary --unknown-json --allow-non-utf8 -b" +
+ " -o /tmp $(location %s) $${TMP}.json &&" +
+ "cp $${TMP}.bin $(location %s)") %
+ (src, flatc, schema, out),
+ tools = [flatc],
+ )
# This is the master list of generated examples that will be made into tests. A
# function called make_XXX_tests() must also appear in generate_examples.py.
@@ -230,10 +248,13 @@ def generated_test_models():
"mul",
"neg",
"not_equal",
+ "pack",
"pad",
"padv2",
"prelu",
"pow",
+ "reduce_max",
+ "reduce_prod",
"relu",
"relu1",
"relu6",
@@ -262,58 +283,58 @@ def generated_test_models():
]
def gen_zip_test(name, test_name, **kwargs):
- """Generate a zipped-example test and its dependent zip files.
+ """Generate a zipped-example test and its dependent zip files.
- Args:
- name: Resulting cc_test target name
- test_name: Test targets this model. Comes from the list above.
- **kwargs: tf_cc_test kwargs.
- """
- gen_zipped_test_file(
- name = "zip_%s" % test_name,
- file = "%s.zip" % test_name,
- )
- tf_cc_test(name, **kwargs)
+ Args:
+ name: Resulting cc_test target name
+ test_name: Test targets this model. Comes from the list above.
+ **kwargs: tf_cc_test kwargs.
+ """
+ gen_zipped_test_file(
+ name = "zip_%s" % test_name,
+ file = "%s.zip" % test_name,
+ )
+ tf_cc_test(name, **kwargs)
def gen_zipped_test_file(name, file):
- """Generate a zip file of tests by using :generate_examples.
+ """Generate a zip file of tests by using :generate_examples.
- Args:
- name: Name of output. We will produce "`file`.files" as a target.
- file: The name of one of the generated_examples targets, e.g. "transpose"
- """
- toco = "//tensorflow/contrib/lite/toco:toco"
- native.genrule(
- name = file + ".files",
- cmd = ("$(locations :generate_examples) --toco $(locations %s) " % toco
- + " --zip_to_output " + file + " $(@D)"),
- outs = [file],
- tools = [
- ":generate_examples",
- toco,
- ],
- )
+ Args:
+ name: Name of output. We will produce "`file`.files" as a target.
+ file: The name of one of the generated_examples targets, e.g. "transpose"
+ """
+ toco = "//tensorflow/contrib/lite/toco:toco"
+ native.genrule(
+ name = file + ".files",
+ cmd = ("$(locations :generate_examples) --toco $(locations %s) " % toco +
+ " --zip_to_output " + file + " $(@D)"),
+ outs = [file],
+ tools = [
+ ":generate_examples",
+ toco,
+ ],
+ )
- native.filegroup(
- name = name,
- srcs = [file],
- )
+ native.filegroup(
+ name = name,
+ srcs = [file],
+ )
def gen_selected_ops(name, model):
- """Generate the library that includes only used ops.
+ """Generate the library that includes only used ops.
- Args:
- name: Name of the generated library.
- model: TFLite model to interpret.
- """
- out = name + "_registration.cc"
- tool = "//tensorflow/contrib/lite/tools:generate_op_registrations"
- tflite_path = "//tensorflow/contrib/lite"
- native.genrule(
- name = name,
- srcs = [model],
- outs = [out],
- cmd = ("$(location %s) --input_model=$(location %s) --output_registration=$(location %s) --tflite_path=%s")
- % (tool, model, out, tflite_path[2:]),
- tools = [tool],
- )
+ Args:
+ name: Name of the generated library.
+ model: TFLite model to interpret.
+ """
+ out = name + "_registration.cc"
+ tool = "//tensorflow/contrib/lite/tools:generate_op_registrations"
+ tflite_path = "//tensorflow/contrib/lite"
+ native.genrule(
+ name = name,
+ srcs = [model],
+ outs = [out],
+ cmd = ("$(location %s) --input_model=$(location %s) --output_registration=$(location %s) --tflite_path=%s") %
+ (tool, model, out, tflite_path[2:]),
+ tools = [tool],
+ )
diff --git a/tensorflow/contrib/lite/build_ios_universal_lib.sh b/tensorflow/contrib/lite/build_ios_universal_lib.sh
index e9531aef19..31df43a175 100755
--- a/tensorflow/contrib/lite/build_ios_universal_lib.sh
+++ b/tensorflow/contrib/lite/build_ios_universal_lib.sh
@@ -21,7 +21,7 @@ cd "$SCRIPT_DIR/../../.."
# Build library for supported architectures and packs them in a fat binary.
make_library() {
- for arch in x86_64 i386 armv7 armv7s arm64
+ for arch in x86_64 armv7 armv7s arm64
do
make -f tensorflow/contrib/lite/Makefile TARGET=IOS IOS_ARCH=${arch} \
-j 8 \
@@ -29,7 +29,6 @@ make_library() {
done
lipo \
tensorflow/contrib/lite/gen/lib/ios_x86_64/${1} \
- tensorflow/contrib/lite/gen/lib/ios_i386/${1} \
tensorflow/contrib/lite/gen/lib/ios_armv7/${1} \
tensorflow/contrib/lite/gen/lib/ios_armv7s/${1} \
tensorflow/contrib/lite/gen/lib/ios_arm64/${1} \
diff --git a/tensorflow/contrib/lite/builtin_op_data.h b/tensorflow/contrib/lite/builtin_op_data.h
index a58dde9a7b..fd16aa1063 100644
--- a/tensorflow/contrib/lite/builtin_op_data.h
+++ b/tensorflow/contrib/lite/builtin_op_data.h
@@ -268,11 +268,20 @@ typedef struct {
} TfLiteShapeParams;
typedef struct {
+ // Parameters supported by version 1:
float min;
float max;
int num_bits;
+
+ // Parameters supported by version 2:
+ bool narrow_range;
} TfLiteFakeQuantParams;
+typedef struct {
+ int values_count;
+ int axis;
+} TfLitePackParams;
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/tensorflow/contrib/lite/builtin_ops.h b/tensorflow/contrib/lite/builtin_ops.h
index 6bde5d2e6d..1ae73b9738 100644
--- a/tensorflow/contrib/lite/builtin_ops.h
+++ b/tensorflow/contrib/lite/builtin_ops.h
@@ -106,6 +106,10 @@ typedef enum {
kTfLiteBuiltinPow = 78,
kTfLiteBuiltinArgMin = 79,
kTfLiteBuiltinFakeQuant = 80,
+ kTfLiteBuiltinReduceProd = 81,
+ kTfLiteBuiltinReduceMax = 82,
+ kTfLiteBuiltinPack = 83,
+ kTfLiteBuiltinLogicalOr = 84,
} TfLiteBuiltinOperator;
#ifdef __cplusplus
diff --git a/tensorflow/contrib/lite/context.h b/tensorflow/contrib/lite/context.h
index 1ff8843fa7..cbfce12d7e 100644
--- a/tensorflow/contrib/lite/context.h
+++ b/tensorflow/contrib/lite/context.h
@@ -464,6 +464,12 @@ typedef struct _TfLiteDelegate {
} TfLiteDelegate;
// WARNING: This is an experimental interface that is subject to change.
+//
+// Currently, TfLiteDelegateParams has to be allocated in a way that it's
+// trivially destructable. It will be stored as `builtin_data` field in
+// `TfLiteNode` of the delegate node.
+//
+// See also the `CreateDelegateParams` function in `interpreter.cc` details.
typedef struct {
TfLiteDelegate* delegate;
TfLiteIntArray* nodes_to_replace;
diff --git a/tensorflow/contrib/lite/delegates/eager/BUILD b/tensorflow/contrib/lite/delegates/eager/BUILD
new file mode 100644
index 0000000000..03a4b7bf1d
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/BUILD
@@ -0,0 +1,97 @@
+#
+# This is a TF Lite delegate that is powered by TensorFlow's Eager.
+#
+package(default_visibility = [
+ "//visibility:public",
+])
+
+licenses(["notice"]) # Apache 2.0
+
+cc_library(
+ name = "buffer_map",
+ srcs = ["buffer_map.cc"],
+ hdrs = ["buffer_map.h"],
+ deps = [
+ ":util",
+ "//tensorflow/c:c_api_internal",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:kernel_api",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:protos_all_cc",
+ ],
+)
+
+cc_test(
+ name = "buffer_map_test",
+ size = "small",
+ srcs = ["buffer_map_test.cc"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable",
+ ],
+ deps = [
+ ":buffer_map",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:util",
+ "//tensorflow/contrib/lite/testing:util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "delegate_data",
+ srcs = ["delegate_data.cc"],
+ hdrs = ["delegate_data.h"],
+ deps = [
+ ":buffer_map",
+ "//tensorflow/core:core_cpu",
+ "//tensorflow/core:lib",
+ "//tensorflow/core/common_runtime/eager:context",
+ ],
+)
+
+cc_test(
+ name = "delegate_data_test",
+ size = "small",
+ srcs = ["delegate_data_test.cc"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable",
+ ],
+ deps = [
+ ":delegate_data",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:util",
+ "//tensorflow/contrib/lite/testing:util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "util",
+ srcs = ["util.cc"],
+ hdrs = ["util.h"],
+ deps = [
+ "//tensorflow/c:c_api_internal",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:kernel_api",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:lib",
+ ],
+)
+
+cc_test(
+ name = "util_test",
+ size = "small",
+ srcs = ["util_test.cc"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable",
+ ],
+ deps = [
+ ":util",
+ "//tensorflow/contrib/lite/testing:util",
+ "//tensorflow/core:lib",
+ "@com_google_googletest//:gtest",
+ ],
+)
diff --git a/tensorflow/contrib/lite/delegates/eager/buffer_map.cc b/tensorflow/contrib/lite/delegates/eager/buffer_map.cc
new file mode 100644
index 0000000000..1d6453f498
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/buffer_map.cc
@@ -0,0 +1,107 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/buffer_map.h"
+
+#include "tensorflow/c/c_api_internal.h"
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+#include "tensorflow/core/framework/allocation_description.pb.h"
+#include "tensorflow/core/framework/log_memory.h"
+
+namespace tflite {
+namespace eager {
+namespace {
+// A tensor buffer that is allocated, deallocated and populated by TF Lite.
+class TfLiteTensorBuffer : public tensorflow::TensorBuffer {
+ public:
+ explicit TfLiteTensorBuffer(const TfLiteTensor* tensor) {
+ len_ = tensor->bytes;
+ // TODO(ahentz): if we can guarantee that TF Lite allocated tensors with
+ // the same alignment as TensorFlow (EIGEN_MAX_ALIGN_BYTES), then we can
+ // potentially eliminate the copy below.
+ data_ =
+ tensorflow::cpu_allocator()->AllocateRaw(EIGEN_MAX_ALIGN_BYTES, len_);
+ if (data_ != nullptr) {
+ if (tensorflow::LogMemory::IsEnabled()) {
+ tensorflow::LogMemory::RecordRawAllocation(
+ "TfLiteTensorBuffer_New",
+ tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, len_,
+ data_, tensorflow::cpu_allocator());
+ }
+ std::memcpy(data_, tensor->data.raw, tensor->bytes);
+ }
+ }
+
+ ~TfLiteTensorBuffer() override {
+ if (tensorflow::LogMemory::IsEnabled() && data_ != nullptr) {
+ tensorflow::LogMemory::RecordRawDeallocation(
+ "TfLiteTensorBuffer_Delete",
+ tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data_,
+ tensorflow::cpu_allocator(), false);
+ }
+ tensorflow::cpu_allocator()->DeallocateRaw(data_);
+ }
+
+ void* data() const override { return data_; }
+ size_t size() const override { return len_; }
+
+ TensorBuffer* root_buffer() override { return this; }
+ void FillAllocationDescription(
+ tensorflow::AllocationDescription* proto) const override {
+ tensorflow::int64 rb = size();
+ proto->set_requested_bytes(rb);
+ proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
+ }
+
+ // Prevents input forwarding from mutating this buffer.
+ bool OwnsMemory() const override { return false; }
+
+ private:
+ void* data_;
+ size_t len_;
+};
+} // namespace
+
+BufferMap::BufferMap() {}
+
+BufferMap::~BufferMap() {}
+
+bool BufferMap::HasTensor(int tensor_index) const {
+ return id_to_tensor_.count(tensor_index) != 0;
+}
+
+tensorflow::Tensor BufferMap::GetTensor(int tensor_index) const {
+ return id_to_tensor_.at(tensor_index);
+}
+
+void BufferMap::SetFromTfLite(int tensor_index, const TfLiteTensor* tensor) {
+ tensorflow::TensorShape shape;
+ int num_dims = tensor->dims->size;
+ for (int i = 0; i < num_dims; ++i) {
+ shape.AddDim(tensor->dims->data[i]);
+ }
+ auto* buf = new TfLiteTensorBuffer(tensor);
+ tensorflow::Tensor t = tensorflow::TensorCApi::MakeTensor(
+ GetTensorFlowDataType(tensor->type), shape, buf);
+ buf->Unref();
+
+ SetFromTensorFlow(tensor_index, std::move(t));
+}
+
+void BufferMap::SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor) {
+ id_to_tensor_[tensor_index] = std::move(tensor);
+}
+
+} // namespace eager
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/buffer_map.h b/tensorflow/contrib/lite/delegates/eager/buffer_map.h
new file mode 100644
index 0000000000..a28329ae7d
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/buffer_map.h
@@ -0,0 +1,61 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_BUFFER_MAP_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_BUFFER_MAP_H_
+
+#include <map>
+
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/core/framework/tensor.h"
+
+namespace tflite {
+namespace eager {
+
+// Maps a TF Lite tensor index into a TensorFlow tensor.
+//
+// The TF Lite interpreter assigns integer indices to each of its tensors, but
+// the Eager delegate deals in terms of TensorFlow tensors. This class maps
+// from indices to tensors and allows the creation of new tensors to be
+// associated with a given index.
+class BufferMap {
+ public:
+ BufferMap();
+ ~BufferMap();
+
+ // Returns true if the given 'tensor_index' has a corresponding
+ // tensorflow::Tensor.
+ bool HasTensor(int tensor_index) const;
+
+ // Returns the tensorflow::Tensor associated with the given 'tensor_index'.
+ // Precondition: HasTensor() is true.
+ tensorflow::Tensor GetTensor(int tensor_index) const;
+
+ // Associates the given tensorflow::Tensor with the given 'tensor_index'.
+ // Note that tensorflow Tensors share data buffers, so this method is only a
+ // shallow copy.
+ void SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor);
+
+ // Same as above but creates a new tensorflow::Tensor with a copy of the
+ // given TfLiteTensor's data.
+ void SetFromTfLite(int tensor_index, const TfLiteTensor* tensor);
+
+ private:
+ std::map<int, tensorflow::Tensor> id_to_tensor_;
+};
+
+} // namespace eager
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_BUFFER_MAP_H_
diff --git a/tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc b/tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc
new file mode 100644
index 0000000000..dcb3f6c941
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/buffer_map_test.cc
@@ -0,0 +1,174 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/buffer_map.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/testing/util.h"
+#include "tensorflow/contrib/lite/util.h"
+
+namespace tflite {
+namespace eager {
+namespace {
+
+using ::testing::ElementsAre;
+
+// A bit of RAII to simplify handling of TfLiteTensors in the tests.
+using UniqueTfLiteTensor =
+ std::unique_ptr<TfLiteTensor, std::function<void(TfLiteTensor*)>>;
+
+template <typename T>
+UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
+ const std::vector<T>& data) {
+ auto tensor = UniqueTfLiteTensor(new TfLiteTensor, [](TfLiteTensor* t) {
+ TfLiteTensorDataFree(t);
+ TfLiteIntArrayFree(t->dims);
+ delete t;
+ });
+ tensor->allocation_type = kTfLiteDynamic;
+ tensor->type = typeToTfLiteType<T>();
+ tensor->dims = ConvertVectorToTfLiteIntArray(shape);
+ tensor->data.raw = nullptr;
+ TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
+ memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
+ return tensor;
+}
+
+template <typename T>
+tensorflow::Tensor MakeTensor(const std::vector<int>& shape,
+ const std::vector<T>& data) {
+ BufferMap buffer_map; // BufferMap is the easiest way to build the tensor.
+ UniqueTfLiteTensor t1 = MakeLiteTensor<T>(shape, data);
+ buffer_map.SetFromTfLite(0, t1.get());
+ return buffer_map.GetTensor(0);
+}
+
+std::vector<int64> GetTensorShape(const tensorflow::Tensor& t) {
+ std::vector<int64> shape(t.dims());
+ for (int i = 0; i < t.dims(); ++i) {
+ shape[i] = t.dim_size(i);
+ }
+ return shape;
+}
+
+template <typename T>
+std::vector<T> GetTensorData(const tensorflow::Tensor& t) {
+ const T* data = t.flat<T>().data();
+ return std::vector<T>(data, data + t.NumElements());
+}
+
+TEST(BufferMapTest, EmptyBuffer) {
+ BufferMap buffer_map;
+ EXPECT_FALSE(buffer_map.HasTensor(0));
+}
+
+TEST(BufferMapTest, SetFromTfLite) {
+ BufferMap buffer_map;
+
+ UniqueTfLiteTensor t =
+ MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+ buffer_map.SetFromTfLite(0, t.get());
+ ASSERT_TRUE(buffer_map.HasTensor(0));
+
+ EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
+ ElementsAre(0, 0, 0, 0.123f, 0, 0));
+
+ // Also check details of the tensor.
+ tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
+ ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
+ ASSERT_EQ(out_tensor.NumElements(), 6);
+ ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
+}
+
+TEST(BufferMapTest, SetFromTfLiteTwice) {
+ UniqueTfLiteTensor t1 =
+ MakeLiteTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+ UniqueTfLiteTensor t2 =
+ MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+
+ BufferMap buffer_map;
+ buffer_map.SetFromTfLite(0, t1.get());
+ buffer_map.SetFromTfLite(0, t2.get());
+
+ EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
+ ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
+}
+
+TEST(BufferMapTest, SetFromTensorFlow) {
+ tensorflow::Tensor t1 =
+ MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+
+ BufferMap buffer_map;
+ buffer_map.SetFromTensorFlow(0, t1);
+
+ EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
+ ElementsAre(0, 0, 0, 0.123f, 0, 0));
+
+ // Also check details of the tensor.
+ tensorflow::Tensor out_tensor = buffer_map.GetTensor(0);
+ ASSERT_EQ(out_tensor.dtype(), tensorflow::DT_FLOAT);
+ ASSERT_EQ(out_tensor.NumElements(), 6);
+ ASSERT_THAT(GetTensorShape(out_tensor), ElementsAre(1, 2, 1, 3));
+}
+
+TEST(BufferMapTest, SetFromTensorFlowTwice) {
+ tensorflow::Tensor t1 =
+ MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+ tensorflow::Tensor t2 = MakeTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+ BufferMap buffer_map;
+ buffer_map.SetFromTensorFlow(0, t1);
+ buffer_map.SetFromTensorFlow(0, t2);
+
+ EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
+ ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
+}
+
+TEST(BufferMapTest, TfLiteOverwritesTensorFlow) {
+ tensorflow::Tensor t1 =
+ MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+ UniqueTfLiteTensor t2 =
+ MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+
+ BufferMap buffer_map;
+ buffer_map.SetFromTensorFlow(0, t1);
+ buffer_map.SetFromTfLite(0, t2.get());
+
+ EXPECT_THAT(GetTensorData<int>(buffer_map.GetTensor(0)),
+ ElementsAre(0, 0, 0, 3, 0, 0, 1, 2));
+}
+
+TEST(BufferMapTest, TensorFlowOverwritesTfLite) {
+ tensorflow::Tensor t1 =
+ MakeTensor<float>({1, 2, 1, 3}, {0, 0, 0, 0.123f, 0, 0});
+ UniqueTfLiteTensor t2 =
+ MakeLiteTensor<int>({1, 2, 4}, {0, 0, 0, 3, 0, 0, 1, 2});
+ BufferMap buffer_map;
+ buffer_map.SetFromTfLite(0, t2.get());
+ buffer_map.SetFromTensorFlow(0, t1);
+
+ EXPECT_THAT(GetTensorData<float>(buffer_map.GetTensor(0)),
+ ElementsAre(0, 0, 0, 0.123f, 0, 0));
+}
+
+} // namespace
+} // namespace eager
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/delegates/eager/delegate_data.cc b/tensorflow/contrib/lite/delegates/eager/delegate_data.cc
new file mode 100644
index 0000000000..29687694bd
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/delegate_data.cc
@@ -0,0 +1,46 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/delegate_data.h"
+
+#include "tensorflow/core/common_runtime/device_factory.h"
+#include "tensorflow/core/lib/core/status.h"
+
+namespace tflite {
+namespace eager {
+tensorflow::Status DelegateData::Create(std::unique_ptr<DelegateData>* data) {
+ std::vector<tensorflow::Device*> devices;
+
+ TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddDevices(
+ tensorflow::SessionOptions(), "/device:cpu:*", &devices));
+
+ std::unique_ptr<tensorflow::DeviceMgr> device_mgr(
+ new tensorflow::DeviceMgr(devices));
+ // Note that Rendezvous is ref-counted so it will be automatically deleted.
+ tensorflow::Rendezvous* rendezvous =
+ new tensorflow::IntraProcessRendezvous(device_mgr.get());
+ data->reset(new DelegateData(new tensorflow::EagerContext(
+ tensorflow::SessionOptions(),
+ tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
+ /*async=*/false, std::move(device_mgr), rendezvous)));
+ return tensorflow::Status();
+}
+
+DelegateData::DelegateData(tensorflow::EagerContext* eager_context)
+ : eager_context_(eager_context) {}
+
+DelegateData::~DelegateData() {}
+
+} // namespace eager
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/delegate_data.h b/tensorflow/contrib/lite/delegates/eager/delegate_data.h
new file mode 100644
index 0000000000..8a0e8ba8bf
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/delegate_data.h
@@ -0,0 +1,48 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_DELEGATE_DATA_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_DELEGATE_DATA_H_
+
+#include "tensorflow/contrib/lite/delegates/eager/buffer_map.h"
+#include "tensorflow/core/common_runtime/eager/context.h"
+
+namespace tflite {
+namespace eager {
+
+// Data kept by the Eager delegate for the lifetime of an Interpreter.
+class DelegateData {
+ public:
+ // Create a new DelegateData, initialized with a newly-created EagerContext.
+ static tensorflow::Status Create(std::unique_ptr<DelegateData>* data);
+
+ ~DelegateData();
+
+ // The EagerContext that is required for execution of Eager Ops.
+ tensorflow::EagerContext* GetEagerContext() { return eager_context_.get(); }
+
+ // Map from TF Lite tensor index to TensorFlow tensor.
+ BufferMap* GetBufferMap() { return &buffer_map_; }
+
+ private:
+ explicit DelegateData(tensorflow::EagerContext* eager_context);
+
+ std::unique_ptr<tensorflow::EagerContext> eager_context_;
+ BufferMap buffer_map_;
+};
+
+} // namespace eager
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_DELEGATE_DATA_H_
diff --git a/tensorflow/contrib/lite/delegates/eager/delegate_data_test.cc b/tensorflow/contrib/lite/delegates/eager/delegate_data_test.cc
new file mode 100644
index 0000000000..30251b8f82
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/delegate_data_test.cc
@@ -0,0 +1,44 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/delegate_data.h"
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/testing/util.h"
+
+namespace tflite {
+namespace eager {
+namespace {
+
+TEST(DelegateDataTest, Basic) {
+ std::unique_ptr<DelegateData> data;
+ // We only check for success because it is hard to make initialization fail.
+ // It only happens if we manage to not link the CPU device factory into the
+ // binary.
+ EXPECT_TRUE(DelegateData::Create(&data).ok());
+
+ EXPECT_NE(data->GetEagerContext(), nullptr);
+ EXPECT_NE(data->GetBufferMap(), nullptr);
+}
+
+} // namespace
+} // namespace eager
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/delegates/eager/util.cc b/tensorflow/contrib/lite/delegates/eager/util.cc
new file mode 100644
index 0000000000..4426c653e6
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util.cc
@@ -0,0 +1,72 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+
+namespace tflite {
+namespace eager {
+
+TfLiteStatus ConvertStatus(TfLiteContext* context,
+ const tensorflow::Status& status) {
+ if (!status.ok()) {
+ context->ReportError(context, "%s", status.error_message().c_str());
+ return kTfLiteError;
+ }
+ return kTfLiteOk;
+}
+
+TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
+ TfLiteTensor* tensor) {
+ int num_dims = src.dims();
+ TfLiteIntArray* shape = TfLiteIntArrayCreate(num_dims);
+ for (int j = 0; j < num_dims; ++j) {
+ // We need to cast from TensorFlow's int64 to TF Lite's int32. Let's
+ // make sure there's no overflow.
+ if (src.dim_size(j) >= std::numeric_limits<int>::max()) {
+ context->ReportError(context,
+ "Dimension value in TensorFlow shape is larger than "
+ "supported by TF Lite");
+ TfLiteIntArrayFree(shape);
+ return kTfLiteError;
+ }
+ shape->data[j] = static_cast<int>(src.dim_size(j));
+ }
+ return context->ResizeTensor(context, tensor, shape);
+}
+
+TF_DataType GetTensorFlowDataType(TfLiteType type) {
+ switch (type) {
+ case kTfLiteNoType:
+ return TF_FLOAT;
+ case kTfLiteFloat32:
+ return TF_FLOAT;
+ case kTfLiteInt16:
+ return TF_INT16;
+ case kTfLiteInt32:
+ return TF_INT32;
+ case kTfLiteUInt8:
+ return TF_UINT8;
+ case kTfLiteInt64:
+ return TF_INT64;
+ case kTfLiteComplex64:
+ return TF_COMPLEX64;
+ case kTfLiteString:
+ return TF_STRING;
+ case kTfLiteBool:
+ return TF_BOOL;
+ }
+}
+
+} // namespace eager
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/delegates/eager/util.h b/tensorflow/contrib/lite/delegates/eager/util.h
new file mode 100644
index 0000000000..a9407be071
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util.h
@@ -0,0 +1,42 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
+#define TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
+
+#include "tensorflow/c/c_api_internal.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/lib/core/status.h"
+
+namespace tflite {
+namespace eager {
+
+// Converts a tensorflow:Status into a TfLiteStatus. If the original status
+// represented an error, reports it using the given 'context'.
+TfLiteStatus ConvertStatus(TfLiteContext* context,
+ const tensorflow::Status& status);
+
+// Copies the given shape of the given 'src' into a TF Lite 'tensor'. Logs an
+// error and returns kTfLiteError if the shape can't be converted.
+TfLiteStatus CopyShape(TfLiteContext* context, const tensorflow::Tensor& src,
+ TfLiteTensor* tensor);
+
+// Returns the TF C API Data type that corresponds to the given TfLiteType.
+TF_DataType GetTensorFlowDataType(TfLiteType type);
+
+} // namespace eager
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_DELEGATES_EAGER_UTIL_H_
diff --git a/tensorflow/contrib/lite/delegates/eager/util_test.cc b/tensorflow/contrib/lite/delegates/eager/util_test.cc
new file mode 100644
index 0000000000..c4fbf54127
--- /dev/null
+++ b/tensorflow/contrib/lite/delegates/eager/util_test.cc
@@ -0,0 +1,113 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/delegates/eager/util.h"
+
+#include <cstdarg>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/testing/util.h"
+
+namespace tflite {
+namespace eager {
+namespace {
+
+using tensorflow::DT_FLOAT;
+using tensorflow::Tensor;
+using ::testing::ElementsAre;
+
+struct TestContext : public TfLiteContext {
+ string error;
+ std::vector<int> new_size;
+};
+
+void ReportError(TfLiteContext* context, const char* format, ...) {
+ TestContext* c = static_cast<TestContext*>(context);
+ const size_t kBufferSize = 1024;
+ char temp_buffer[kBufferSize];
+
+ va_list args;
+ va_start(args, format);
+ vsnprintf(temp_buffer, kBufferSize, format, args);
+ va_end(args);
+
+ c->error = temp_buffer;
+}
+
+TfLiteStatus ResizeTensor(TfLiteContext* context, TfLiteTensor* tensor,
+ TfLiteIntArray* new_size) {
+ TestContext* c = static_cast<TestContext*>(context);
+ c->new_size.clear();
+ for (int i = 0; i < new_size->size; ++i) {
+ c->new_size.push_back(new_size->data[i]);
+ }
+ TfLiteIntArrayFree(new_size);
+ return kTfLiteOk;
+}
+
+TEST(UtilTest, ConvertStatus) {
+ TestContext context;
+ context.ReportError = ReportError;
+
+ EXPECT_EQ(ConvertStatus(&context, tensorflow::errors::Internal("Some Error")),
+ kTfLiteError);
+ EXPECT_EQ(context.error, "Some Error");
+
+ context.error.clear();
+ EXPECT_EQ(ConvertStatus(&context, tensorflow::Status()), kTfLiteOk);
+ EXPECT_TRUE(context.error.empty());
+}
+
+TEST(UtilTest, CopyShape) {
+ TestContext context;
+ context.ReportError = ReportError;
+ context.ResizeTensor = ResizeTensor;
+
+ TfLiteTensor dst;
+
+ EXPECT_EQ(CopyShape(&context, Tensor(), &dst), kTfLiteOk);
+ EXPECT_THAT(context.new_size, ElementsAre(0));
+
+ EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1, 2}), &dst), kTfLiteOk);
+ EXPECT_THAT(context.new_size, ElementsAre(1, 2));
+
+ EXPECT_EQ(CopyShape(&context, Tensor(DT_FLOAT, {1LL << 44, 2}), &dst),
+ kTfLiteError);
+ EXPECT_EQ(context.error,
+ "Dimension value in TensorFlow shape is larger than supported by "
+ "TF Lite");
+}
+
+TEST(UtilTest, TypeConversions) {
+ EXPECT_EQ(TF_FLOAT, GetTensorFlowDataType(kTfLiteNoType));
+ EXPECT_EQ(TF_FLOAT, GetTensorFlowDataType(kTfLiteFloat32));
+ EXPECT_EQ(TF_INT16, GetTensorFlowDataType(kTfLiteInt16));
+ EXPECT_EQ(TF_INT32, GetTensorFlowDataType(kTfLiteInt32));
+ EXPECT_EQ(TF_UINT8, GetTensorFlowDataType(kTfLiteUInt8));
+ EXPECT_EQ(TF_INT64, GetTensorFlowDataType(kTfLiteInt64));
+ EXPECT_EQ(TF_COMPLEX64, GetTensorFlowDataType(kTfLiteComplex64));
+ EXPECT_EQ(TF_STRING, GetTensorFlowDataType(kTfLiteString));
+ EXPECT_EQ(TF_BOOL, GetTensorFlowDataType(kTfLiteBool));
+}
+
+} // namespace
+} // namespace eager
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/delegates/nnapi/BUILD b/tensorflow/contrib/lite/delegates/nnapi/BUILD
index 35a8f6ca41..091f8fbce7 100644
--- a/tensorflow/contrib/lite/delegates/nnapi/BUILD
+++ b/tensorflow/contrib/lite/delegates/nnapi/BUILD
@@ -22,6 +22,7 @@ tf_cc_test(
name = "nnapi_delegate_test",
size = "small",
srcs = ["nnapi_delegate_test.cc"],
+ tags = ["no_oss"],
deps = [
":nnapi_delegate",
"//tensorflow/contrib/lite:framework",
diff --git a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc
index f0d16575ec..0c7f6d3125 100644
--- a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate.cc
@@ -452,6 +452,18 @@ class NNAPIDelegateKernel {
} else {
return nullptr;
}
+ case kTfLiteBuiltinL2Normalization: {
+ auto builtin =
+ reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
+ if (builtin->activation != kTfLiteActNone) {
+ // NNAPI does not support activations
+ return nullptr;
+ }
+ return [](TfLiteContext* context, NNAPIOpBuilder* builder,
+ TfLiteNode* node) -> ANeuralNetworksOperationType {
+ return ANEURALNETWORKS_L2_NORMALIZATION;
+ };
+ }
case kTfLiteBuiltinTranspose:
// Transpose requires NNAPI1.1. Also note that the permutation input
// tensor value dictates the output dimensions.
diff --git a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc
index ab2181e8ff..baf8046f9b 100644
--- a/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc
+++ b/tensorflow/contrib/lite/delegates/nnapi/nnapi_delegate_test.cc
@@ -641,6 +641,41 @@ TEST(NNAPIDelegate, SqueezeWithAxisTest) {
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
}
+class L2NormOpModel : public SingleOpModelWithNNAPI {
+ public:
+ L2NormOpModel(const TensorData& input, const TensorData& output,
+ ActivationFunctionType activation_type) {
+ input_ = AddInput(input);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_L2_NORMALIZATION, BuiltinOptions_L2NormOptions,
+ CreateL2NormOptions(builder_, activation_type).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+
+ void SetInput(std::initializer_list<float> data) {
+ PopulateTensor<float>(input_, data);
+ }
+ std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int input_;
+ int new_shape_;
+ int output_;
+};
+
+TEST(NNAPIDelegate, L2NormSimpleTest) {
+ std::initializer_list<float> data = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
+ L2NormOpModel m({TensorType_FLOAT32, {1, 1, 1, 6}},
+ {TensorType_FLOAT32, {1, 1, 1, 6}},
+ ActivationFunctionType_NONE);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 6}));
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
+}
+
class TransposeSimpleModel : public SingleOpModelWithNNAPI {
public:
TransposeSimpleModel(std::initializer_list<int> input_shape,
diff --git a/tensorflow/contrib/lite/download_dependencies.sh b/tensorflow/contrib/lite/download_dependencies.sh
index 840015a7fa..8c7df474d5 100755
--- a/tensorflow/contrib/lite/download_dependencies.sh
+++ b/tensorflow/contrib/lite/download_dependencies.sh
@@ -35,7 +35,7 @@ GOOGLETEST_URL="https://github.com/google/googletest/archive/release-1.8.0.tar.g
ABSL_URL="$(grep -o 'https://github.com/abseil/abseil-cpp/.*tar.gz' "${BZL_FILE_PATH}" | head -n1)"
NEON_2_SSE_URL="https://github.com/intel/ARM_NEON_2_x86_SSE/archive/master.zip"
FARMHASH_URL="https://mirror.bazel.build/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz"
-FLATBUFFERS_URL="https://github.com/google/flatbuffers/archive/master.zip"
+FLATBUFFERS_URL="https://github.com/google/flatbuffers/archive/v1.8.0.zip"
FFT2D_URL="https://mirror.bazel.build/www.kurims.kyoto-u.ac.jp/~ooura/fft.tgz"
# TODO(petewarden): Some new code in Eigen triggers a clang bug with iOS arm64,
diff --git a/tensorflow/contrib/lite/examples/android/app/README.md b/tensorflow/contrib/lite/examples/android/app/README.md
index 3065a5f6ee..cbdeeac879 100644
--- a/tensorflow/contrib/lite/examples/android/app/README.md
+++ b/tensorflow/contrib/lite/examples/android/app/README.md
@@ -2,12 +2,12 @@
## Building from Source with Bazel
-1. Follow the [Bazel steps for the TF Demo App](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android#bazel).
+1. Install [Bazel](https://docs.bazel.build/versions/master/install.html), the Android NDK and SDK. The recommended versions are specified on this [webpage](https://www.tensorflow.org/mobile/tflite/demo_android#build_tensorflow_lite_and_the_demo_app_from_source).
-2. Build the app with Bazel. The demo needs C++11:
+2. Build this demo app with Bazel. The demo needs C++11. We configure the fat_apk_cpu flag to package support for 4 hardware variants. You may replace it with --config=android_arm64 on a 64-bit device and --config=android_arm for 32-bit device:
```shell
- bazel build -c opt --cxxopt='--std=c++11' \
+ bazel build -c opt --cxxopt='--std=c++11' --fat_apk_cpu=x86,x86_64,arm64-v8a,armeabi-v7a \
//tensorflow/contrib/lite/examples/android:tflite_demo
```
diff --git a/tensorflow/contrib/lite/examples/android/app/build.gradle b/tensorflow/contrib/lite/examples/android/app/build.gradle
index 1ffb9dd377..eb7fd705e1 100644
--- a/tensorflow/contrib/lite/examples/android/app/build.gradle
+++ b/tensorflow/contrib/lite/examples/android/app/build.gradle
@@ -51,7 +51,7 @@ apply from: "download-models.gradle"
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
- androidTestCompile('com.androidx.test.espresso:espresso-core:2.2.2', {
+ androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
exclude group: 'com.android.support', module: 'support-annotations'
})
compile 'org.tensorflow:tensorflow-lite:0.0.0-nightly'
diff --git a/tensorflow/contrib/lite/examples/label_image/BUILD b/tensorflow/contrib/lite/examples/label_image/BUILD
index c61445114e..fc55a78019 100644
--- a/tensorflow/contrib/lite/examples/label_image/BUILD
+++ b/tensorflow/contrib/lite/examples/label_image/BUILD
@@ -63,6 +63,7 @@ cc_test(
data = [
"testdata/grace_hopper.bmp",
],
+ tags = ["no_oss"],
deps = [
":bitmap_helpers",
"@com_google_googletest//:gtest",
diff --git a/tensorflow/contrib/lite/experimental/c/BUILD b/tensorflow/contrib/lite/experimental/c/BUILD
new file mode 100644
index 0000000000..b09bb9ea10
--- /dev/null
+++ b/tensorflow/contrib/lite/experimental/c/BUILD
@@ -0,0 +1,63 @@
+package(default_visibility = ["//visibility:private"])
+
+licenses(["notice"]) # Apache 2.0
+
+load(
+ "//tensorflow/contrib/lite:build_def.bzl",
+ "tflite_cc_shared_object",
+ "tflite_jni_binary",
+)
+
+tflite_cc_shared_object(
+ name = "libtensorflowlite_c.so",
+ linkopts = select({
+ "//tensorflow:darwin": [
+ "-Wl,-exported_symbols_list", # This line must be directly followed by the exported_symbols.lds file
+ "$(location //tensorflow/contrib/lite/experimental/c:exported_symbols.lds)",
+ "-Wl,-install_name,@rpath/libtensorflowlite_c.so",
+ ],
+ "//tensorflow:windows": [],
+ "//conditions:default": [
+ "-z defs",
+ "-Wl,--version-script", # This line must be directly followed by the version_script.lds file
+ "$(location //tensorflow/contrib/lite/experimental/c:version_script.lds)",
+ ],
+ }),
+ deps = [
+ ":c_api",
+ ":exported_symbols.lds",
+ ":version_script.lds",
+ ],
+)
+
+tflite_jni_binary(
+ name = "libtensorflowlite_c_jni.so",
+ linkscript = ":version_script.lds",
+ deps = [":c_api"],
+)
+
+cc_library(
+ name = "c_api",
+ srcs = ["c_api.cc"],
+ hdrs = ["c_api.h"],
+ deps = [
+ "//tensorflow/contrib/lite:context",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:schema_fbs_version",
+ "//tensorflow/contrib/lite/kernels:builtin_ops",
+ ],
+)
+
+cc_test(
+ name = "c_api_test",
+ size = "small",
+ srcs = ["c_api_test.cc"],
+ data = ["//tensorflow/contrib/lite:testdata/add.bin"],
+ deps = [
+ ":c_api",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite:kernel_api",
+ "//tensorflow/contrib/lite/testing:util",
+ "@com_google_googletest//:gtest",
+ ],
+)
diff --git a/tensorflow/contrib/lite/experimental/c/c_api.cc b/tensorflow/contrib/lite/experimental/c/c_api.cc
new file mode 100644
index 0000000000..add4c6813d
--- /dev/null
+++ b/tensorflow/contrib/lite/experimental/c/c_api.cc
@@ -0,0 +1,118 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/contrib/lite/experimental/c/c_api.h"
+
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/model.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+struct _TFL_Interpreter {
+ std::unique_ptr<tflite::Interpreter> impl;
+};
+
+TFL_Interpreter* TFL_NewInterpreter(const void* model_data,
+ int32_t model_size) {
+ auto model = tflite::FlatBufferModel::BuildFromBuffer(
+ static_cast<const char*>(model_data), static_cast<size_t>(model_size));
+ if (!model) {
+ return nullptr;
+ }
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ tflite::InterpreterBuilder builder(*model, resolver);
+ std::unique_ptr<tflite::Interpreter> interpreter_impl;
+ if (builder(&interpreter_impl) != kTfLiteOk) {
+ return nullptr;
+ }
+
+ return new TFL_Interpreter{std::move(interpreter_impl)};
+}
+
+void TFL_DeleteInterpreter(TFL_Interpreter* interpreter) { delete interpreter; }
+
+int32_t TFL_InterpreterGetInputTensorCount(const TFL_Interpreter* interpreter) {
+ return static_cast<int>(interpreter->impl->inputs().size());
+}
+
+TFL_Tensor* TFL_InterpreterGetInputTensor(const TFL_Interpreter* interpreter,
+ int32_t input_index) {
+ return interpreter->impl->tensor(interpreter->impl->inputs()[input_index]);
+}
+
+TFL_Status TFL_InterpreterResizeInputTensor(TFL_Interpreter* interpreter,
+ int32_t input_index,
+ const int* input_dims,
+ int32_t input_dims_size) {
+ std::vector<int> dims{input_dims, input_dims + input_dims_size};
+ return interpreter->impl->ResizeInputTensor(
+ interpreter->impl->inputs()[input_index], dims);
+}
+
+TFL_Status TFL_InterpreterAllocateTensors(TFL_Interpreter* interpreter) {
+ return interpreter->impl->AllocateTensors();
+}
+
+TFL_Status TFL_InterpreterInvoke(TFL_Interpreter* interpreter) {
+ return interpreter->impl->Invoke();
+}
+
+int32_t TFL_InterpreterGetOutputTensorCount(
+ const TFL_Interpreter* interpreter) {
+ return static_cast<int>(interpreter->impl->outputs().size());
+}
+
+const TFL_Tensor* TFL_InterpreterGetOutputTensor(
+ const TFL_Interpreter* interpreter, int32_t output_index) {
+ return interpreter->impl->tensor(interpreter->impl->outputs()[output_index]);
+}
+
+TFL_Type TFL_TensorType(const TFL_Tensor* tensor) { return tensor->type; }
+
+int32_t TFL_TensorNumDims(const TFL_Tensor* tensor) {
+ return tensor->dims->size;
+}
+
+int32_t TFL_TensorDim(const TFL_Tensor* tensor, int32_t dim_index) {
+ return tensor->dims->data[dim_index];
+}
+
+size_t TFL_TensorByteSize(const TFL_Tensor* tensor) { return tensor->bytes; }
+
+TFL_Status TFL_TensorCopyFromBuffer(TFL_Tensor* tensor, const void* input_data,
+ int32_t input_data_size) {
+ if (tensor->bytes != static_cast<size_t>(input_data_size)) {
+ return kTfLiteError;
+ }
+ memcpy(tensor->data.raw, input_data, input_data_size);
+ return kTfLiteOk;
+}
+
+TFL_Status TFL_TensorCopyToBuffer(const TFL_Tensor* tensor, void* output_data,
+ int32_t output_data_size) {
+ if (tensor->bytes != static_cast<size_t>(output_data_size)) {
+ return kTfLiteError;
+ }
+ memcpy(output_data, tensor->data.raw, output_data_size);
+ return kTfLiteOk;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
diff --git a/tensorflow/contrib/lite/experimental/c/c_api.h b/tensorflow/contrib/lite/experimental/c/c_api.h
new file mode 100644
index 0000000000..070f1add13
--- /dev/null
+++ b/tensorflow/contrib/lite/experimental/c/c_api.h
@@ -0,0 +1,149 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_CONTRIB_LITE_EXPERIMENTAL_C_C_API_H_
+#define TENSORFLOW_CONTRIB_LITE_EXPERIMENTAL_C_C_API_H_
+
+#include <stdint.h>
+
+// Eventually the various C APIs defined in context.h will be migrated into
+// the appropriate /c/c_api*.h header. For now, we pull in existing definitions
+// for convenience.
+#include "tensorflow/contrib/lite/context.h"
+
+// --------------------------------------------------------------------------
+// Experimental C API for TensorFlowLite.
+//
+// The API leans towards simplicity and uniformity instead of convenience, as
+// most usage will be by language-specific wrappers.
+//
+// Conventions:
+// * We use the prefix TFL_ for everything in the API.
+
+#ifdef SWIG
+#define TFL_CAPI_EXPORT
+#else
+#if defined(_WIN32)
+#ifdef TF_COMPILE_LIBRARY
+#define TFL_CAPI_EXPORT __declspec(dllexport)
+#else
+#define TFL_CAPI_EXPORT __declspec(dllimport)
+#endif // TF_COMPILE_LIBRARY
+#else
+#define TFL_CAPI_EXPORT __attribute__((visibility("default")))
+#endif // _WIN32
+#endif // SWIG
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+typedef TfLiteTensor TFL_Tensor;
+typedef TfLiteStatus TFL_Status;
+typedef TfLiteType TFL_Type;
+
+// --------------------------------------------------------------------------
+// TFL_Interpreter provides inference from a provided model.
+typedef struct _TFL_Interpreter TFL_Interpreter;
+
+// Returns an interpreter for the provided model, or null on failure.
+//
+// NOTE: The client *must* explicitly allocate tensors before attempting to
+// access input tensor data or invoke the interpreter.
+TFL_CAPI_EXPORT extern TFL_Interpreter* TFL_NewInterpreter(
+ const void* model_data, int32_t model_size);
+
+// Destroys the interpreter.
+TFL_CAPI_EXPORT extern void TFL_DeleteInterpreter(TFL_Interpreter* interpreter);
+
+// Returns the number of input tensors associated with the model.
+TFL_CAPI_EXPORT extern int TFL_InterpreterGetInputTensorCount(
+ const TFL_Interpreter* interpreter);
+
+// Returns the tensor associated with the input index.
+// REQUIRES: 0 <= input_index < TFL_InterpreterGetInputTensorCount(tensor)
+TFL_CAPI_EXPORT extern TFL_Tensor* TFL_InterpreterGetInputTensor(
+ const TFL_Interpreter* interpreter, int32_t input_index);
+
+// Attempts to resize the specified input tensor.
+// NOTE: After a resize, the client *must* explicitly allocate tensors before
+// attempting to access the resized tensor data or invoke the interpreter.
+// REQUIRES: 0 <= input_index < TFL_InterpreterGetInputTensorCount(tensor)
+TFL_CAPI_EXPORT extern TFL_Status TFL_InterpreterResizeInputTensor(
+ TFL_Interpreter* interpreter, int32_t input_index, const int* input_dims,
+ int32_t input_dims_size);
+
+// Updates allocations for all tensors, resizing dependent tensors using the
+// specified input tensor dimensionality.
+//
+// This is a relatively expensive operation, and need only be called after
+// creating the graph and/or resizing any inputs.
+TFL_CAPI_EXPORT extern TFL_Status TFL_InterpreterAllocateTensors(
+ TFL_Interpreter* interpreter);
+
+// Runs inference for the loaded graph.
+//
+// NOTE: It is possible that the interpreter is not in a ready state to
+// evaluate (e.g., if a ResizeInputTensor() has been performed without a call to
+// AllocateTensors()).
+TFL_CAPI_EXPORT extern TFL_Status TFL_InterpreterInvoke(
+ TFL_Interpreter* interpreter);
+
+// Returns the number of output tensors associated with the model.
+TFL_CAPI_EXPORT extern int32_t TFL_InterpreterGetOutputTensorCount(
+ const TFL_Interpreter* interpreter);
+
+// Returns the tensor associated with the output index.
+// REQUIRES: 0 <= input_index < TFL_InterpreterGetOutputTensorCount(tensor)
+TFL_CAPI_EXPORT extern const TFL_Tensor* TFL_InterpreterGetOutputTensor(
+ const TFL_Interpreter* interpreter, int32_t output_index);
+
+// --------------------------------------------------------------------------
+// TFL_Tensor wraps data associated with a graph tensor.
+//
+// Note that, while the TFL_Tensor struct is not currently opaque, and its
+// fields can be accessed directly, these methods are still convenient for
+// language bindings. In the future the tensor struct will likely be made opaque
+// in the public API.
+
+// Returns the type of a tensor element.
+TFL_CAPI_EXPORT extern TFL_Type TFL_TensorType(const TFL_Tensor* tensor);
+
+// Returns the number of dimensions that the tensor has.
+TFL_CAPI_EXPORT extern int32_t TFL_TensorNumDims(const TFL_Tensor* tensor);
+
+// Returns the length of the tensor in the "dim_index" dimension.
+// REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor)
+TFL_CAPI_EXPORT extern int32_t TFL_TensorDim(const TFL_Tensor* tensor,
+ int32_t dim_index);
+
+// Returns the size of the underlying data in bytes.
+TFL_CAPI_EXPORT extern size_t TFL_TensorByteSize(const TFL_Tensor* tensor);
+
+// Copies from the provided input buffer into the tensor's buffer.
+// REQUIRES: input_data_size == TFL_TensorByteSize(tensor)
+TFL_CAPI_EXPORT extern TFL_Status TFL_TensorCopyFromBuffer(
+ TFL_Tensor* tensor, const void* input_data, int32_t input_data_size);
+
+// Copies to the provided output buffer from the tensor's buffer.
+// REQUIRES: output_data_size == TFL_TensorByteSize(tensor)
+TFL_CAPI_EXPORT extern TFL_Status TFL_TensorCopyToBuffer(
+ const TFL_Tensor* output_tensor, void* output_data,
+ int32_t output_data_size);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // TENSORFLOW_CONTRIB_LITE_EXPERIMENTAL_C_C_API_H_
diff --git a/tensorflow/contrib/lite/experimental/c/c_api_test.cc b/tensorflow/contrib/lite/experimental/c/c_api_test.cc
new file mode 100644
index 0000000000..bc925e00a6
--- /dev/null
+++ b/tensorflow/contrib/lite/experimental/c/c_api_test.cc
@@ -0,0 +1,84 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <array>
+
+#include "tensorflow/contrib/lite/experimental/c/c_api.h"
+
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/allocation.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/testing/util.h"
+
+namespace {
+
+TEST(CApiSimple, Smoke) {
+ tflite::FileCopyAllocation model_file(
+ "tensorflow/contrib/lite/testdata/add.bin",
+ tflite::DefaultErrorReporter());
+
+ TFL_Interpreter* interpreter =
+ TFL_NewInterpreter(model_file.base(), model_file.bytes());
+ ASSERT_NE(interpreter, nullptr);
+ ASSERT_EQ(TFL_InterpreterAllocateTensors(interpreter), kTfLiteOk);
+
+ ASSERT_EQ(TFL_InterpreterGetInputTensorCount(interpreter), 1);
+ ASSERT_EQ(TFL_InterpreterGetOutputTensorCount(interpreter), 1);
+
+ std::array<int, 1> input_dims = {2};
+ ASSERT_EQ(TFL_InterpreterResizeInputTensor(interpreter, 0, input_dims.data(),
+ input_dims.size()),
+ kTfLiteOk);
+ ASSERT_EQ(TFL_InterpreterAllocateTensors(interpreter), kTfLiteOk);
+
+ TFL_Tensor* input_tensor = TFL_InterpreterGetInputTensor(interpreter, 0);
+ ASSERT_NE(input_tensor, nullptr);
+ EXPECT_EQ(TFL_TensorType(input_tensor), kTfLiteFloat32);
+ EXPECT_EQ(TFL_TensorNumDims(input_tensor), 1);
+ EXPECT_EQ(TFL_TensorDim(input_tensor, 0), 2);
+ EXPECT_EQ(TFL_TensorByteSize(input_tensor), sizeof(float) * 2);
+
+ std::array<float, 2> input = {1.f, 3.f};
+ ASSERT_EQ(TFL_TensorCopyFromBuffer(input_tensor, input.data(),
+ input.size() * sizeof(float)),
+ kTfLiteOk);
+
+ ASSERT_EQ(TFL_InterpreterInvoke(interpreter), kTfLiteOk);
+
+ const TFL_Tensor* output_tensor =
+ TFL_InterpreterGetOutputTensor(interpreter, 0);
+ ASSERT_NE(output_tensor, nullptr);
+ EXPECT_EQ(TFL_TensorType(output_tensor), kTfLiteFloat32);
+ EXPECT_EQ(TFL_TensorNumDims(output_tensor), 1);
+ EXPECT_EQ(TFL_TensorDim(output_tensor, 0), 2);
+ EXPECT_EQ(TFL_TensorByteSize(output_tensor), sizeof(float) * 2);
+
+ std::array<float, 2> output;
+ ASSERT_EQ(TFL_TensorCopyToBuffer(output_tensor, output.data(),
+ output.size() * sizeof(float)),
+ kTfLiteOk);
+ EXPECT_EQ(output[0], 3.f);
+ EXPECT_EQ(output[1], 9.f);
+
+ TFL_DeleteInterpreter(interpreter);
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/experimental/c/exported_symbols.lds b/tensorflow/contrib/lite/experimental/c/exported_symbols.lds
new file mode 100644
index 0000000000..a3ddc6bc8d
--- /dev/null
+++ b/tensorflow/contrib/lite/experimental/c/exported_symbols.lds
@@ -0,0 +1 @@
+_TFL_*
diff --git a/tensorflow/contrib/lite/experimental/c/version_script.lds b/tensorflow/contrib/lite/experimental/c/version_script.lds
new file mode 100644
index 0000000000..c0c8a2bca1
--- /dev/null
+++ b/tensorflow/contrib/lite/experimental/c/version_script.lds
@@ -0,0 +1,9 @@
+VERS_1.0 {
+ # Export symbols in c_api.h.
+ global:
+ *TFL_*;
+
+ # Hide everything else.
+ local:
+ *;
+};
diff --git a/tensorflow/contrib/lite/g3doc/README.md b/tensorflow/contrib/lite/g3doc/README.md
new file mode 100644
index 0000000000..e3db478481
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/README.md
@@ -0,0 +1,4 @@
+This is a *work-in-progress* TF Lite subsite for:
+https://www.tensorflow.org/mobile
+
+DO NOT PUBLISH
diff --git a/tensorflow/contrib/lite/g3doc/_book.yaml b/tensorflow/contrib/lite/g3doc/_book.yaml
new file mode 100644
index 0000000000..98abd5743b
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/_book.yaml
@@ -0,0 +1,58 @@
+upper_tabs:
+# Tabs left of dropdown menu
+- include: /_upper_tabs_left.yaml
+# Dropdown menu
+- name: Ecosystem
+ path: /ecosystem
+ is_default: True
+ menu:
+ - include: /ecosystem/_menu_toc.yaml
+ lower_tabs:
+ # Subsite tabs
+ other:
+ - name: Guide
+ contents:
+ - title: Overview
+ path: /mobile/overview
+ - title: Developer Guide
+ path: /mobile/devguide
+ - title: Android Demo App
+ path: /mobile/demo_android
+ - title: iOS Demo App
+ path: /mobile/demo_ios
+ - title: Performance
+ path: /mobile/performance
+ - break: True
+ - title: TensorFlow Lite APIs
+ path: /mobile/apis
+ - title: Custom operators
+ path: /mobile/custom_operators
+ - title: TensorFlow Lite Ops Versioning
+ path: /mobile/ops_versioning
+ - title: TensorFlow Lite Compatibility Guide
+ path: /mobile/tf_ops_compatibility
+ - title: List of Hosted Models
+ path: /mobile/models
+ - title: TensorFlow Lite for iOS
+ path: /mobile/ios
+ - title: TensorFlow Lite for Raspberry Pi
+ path: /mobile/rpi
+
+ - heading: TF Mobile
+ status: deprecated
+ - title: Overview
+ path: /mobile/tfmobile/
+ - title: Building TensorFlow on Android
+ path: /mobile/tfmobile/android_build
+ - title: Building TensorFlow on IOS
+ path: /mobile/tfmobile/ios_build
+ - title: Integrating TensorFlow libraries
+ path: /mobile/tfmobile/linking_libs
+ - title: Preparing models for mobile deployment
+ path: /mobile/tfmobile/prepare_models
+ - title: Optimizing for mobile
+ path: /mobile/tfmobile/optimizing
+
+ - name: API
+ contents:
+ - include: /mobile/api_docs/python/_toc.yaml
diff --git a/tensorflow/contrib/lite/g3doc/_index.yaml b/tensorflow/contrib/lite/g3doc/_index.yaml
new file mode 100644
index 0000000000..9119e49117
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/_index.yaml
@@ -0,0 +1,67 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+description: <!--no description-->
+landing_page:
+ rows:
+ - heading: TensorFlow Lite is a lightweight solution for mobile and embedded devices.
+ items:
+ - description: >
+ TensorFlow Lite is TensorFlow’s lightweight solution for mobile and
+ embedded devices. It enables on-device machine learning inference with
+ low latency and a small binary size. TensorFlow Lite also supports
+ hardware acceleration with the
+ <a href='https://developer.android.com/ndk/guides/neuralnetworks/index.html'>Android Neural Networks API</a>.
+ list:
+ - heading: Key point 1
+ description: >
+ [high-level overview]
+ icon:
+ icon_name: chevron_right
+ foreground: theme
+ background: grey
+ - heading: Key point 2
+ description: >
+ [high-level overview]
+ icon:
+ icon_name: chevron_right
+ foreground: theme
+ background: grey
+ - heading: Key point 3
+ description: >
+ [high-level overview]
+ icon:
+ icon_name: chevron_right
+ foreground: theme
+ background: grey
+ - code_block: |
+ <pre class = "prettyprint">
+ $ toco --input_file=$(pwd)/mobilenet_v1_1.0_224/frozen_graph.pb \
+ --input_format=TENSORFLOW_GRAPHDEF \
+ --output_format=TFLITE \
+ --output_file=/tmp/mobilenet_v1_1.0_224.tflite \
+ --inference_type=FLOAT \
+ --input_type=FLOAT \
+ --input_arrays=input \
+ --output_arrays=MobilenetV1/Predictions/Reshape_1 \
+ --input_shapes=1,224,224,3
+ </pre>
+
+ - classname: devsite-landing-row-cards
+ items:
+ - heading: Using TensorFlow Lite on Android
+ image_path: /ecosystem/images/tf-logo-card-16x9.png
+ path: https://medium.com/tensorflow/using-tensorflow-lite-on-android-9bbc9cb7d69d
+ buttons:
+ - label: Read on TensorFlow blog
+ path: https://medium.com/tensorflow/using-tensorflow-lite-on-android-9bbc9cb7d69d
+ - heading: TensorFlow Lite at the Dev Summit
+ youtube_id: FAMfy7izB6A
+ buttons:
+ - label: Watch the video
+ path: https://www.youtube.com/watch?v=FAMfy7izB6A
+ - heading: TensorFlow Lite on GitHub
+ image_path: /ecosystem/images/github-card-16x9.png
+ path: https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite
+ buttons:
+ - label: View on GitHub
+ path: https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite
diff --git a/tensorflow/contrib/lite/g3doc/_project.yaml b/tensorflow/contrib/lite/g3doc/_project.yaml
new file mode 100644
index 0000000000..b39666516b
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/_project.yaml
@@ -0,0 +1,10 @@
+name: TensorFlow Lite
+breadcrumb_name: Mobile
+home_url: /mobile/
+parent_project_metadata_path: /_project.yaml
+description: >
+ TensorFlow Lite is a lightweight solution for mobile and embedded devices.
+use_site_branding: True
+hide_from_products_list: True
+content_license: cc3-apache2
+buganizer_id: 316308
diff --git a/tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml b/tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml
new file mode 100644
index 0000000000..1e1c44c692
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/api_docs/python/_toc.yaml
@@ -0,0 +1,6 @@
+# Automatically generated file; please do not edit
+toc:
+ - title: TensorFlow Lite
+ section:
+ - title: Overview
+ path: /mobile/api_docs/python/
diff --git a/tensorflow/contrib/lite/g3doc/api_docs/python/index.md b/tensorflow/contrib/lite/g3doc/api_docs/python/index.md
new file mode 100644
index 0000000000..70031a3c3d
--- /dev/null
+++ b/tensorflow/contrib/lite/g3doc/api_docs/python/index.md
@@ -0,0 +1,10 @@
+Project: /mobile/_project.yaml
+Book: /mobile/_book.yaml
+page_type: reference
+<style> table img { max-width: 100%; } </style>
+<script src="/_static/js/managed/mathjax/MathJax.js?config=TeX-AMS-MML_SVG"></script>
+
+<!-- DO NOT EDIT! Automatically generated file. -->
+# All symbols in TensorFlow Lite
+
+TEMP PAGE
diff --git a/tensorflow/contrib/lite/g3doc/apis.md b/tensorflow/contrib/lite/g3doc/apis.md
index a591a353dd..776803da8c 100644
--- a/tensorflow/contrib/lite/g3doc/apis.md
+++ b/tensorflow/contrib/lite/g3doc/apis.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite APIs
TensorFlow Lite provides programming APIs in C++ and Java, and in both cases
@@ -53,6 +56,7 @@ typedef enum {
```
Failures can be easily verified with:
+
```c++
if (status != kTfLiteOk) {
// ... error handling here ...
diff --git a/tensorflow/contrib/lite/g3doc/benchmarks.md b/tensorflow/contrib/lite/g3doc/benchmarks.md
deleted file mode 100644
index 96536cba27..0000000000
--- a/tensorflow/contrib/lite/g3doc/benchmarks.md
+++ /dev/null
@@ -1,178 +0,0 @@
-# Performance Benchmark numbers
-
-This document contains the performance benchmark numbers for running a few well
-known models on some Android and iOS devices.
-
-The benchmark numbers were generated by running the [TFLite benchmark
-binary](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark)
-on Android and running the [iOS benchmark
-app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
-on iOS.
-
-# Android benchmarks
-
-When running Android benchmarks, the CPU affinity is set to use big cores on the
-device to reduce variance (see
-[details](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#reducing-variance-between-runs-on-android)).
-
-Models are assumed to have been downloaded from the link, unzipped and pushed to
-`/data/local/tmp/tflite_models` folder. The benchmark binary is built according
-to instructions listed
-[here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark#on-android)
-and is assumed to have been pushed to `/data/local/tmp`.
-
-The following command was used to run the benchmark:
-
-```
-adb shell taskset ${CPU_MASK} /data/local/tmp/benchmark_model \
- --num_threads=1 \
- --graph=/data/local/tmp/tflite_models/${GRAPH} \
- --warmup_runs=1 \
- --num_runs=50 \
- --use_nnapi=false
-```
-
-where `${GRAPH}` is the name of model and `${CPU_MASK}` is the CPU affinity
-chosen according to the following table:
-
-Device | CPU_MASK |
--------| ----------
-Pixel 2 | f0 |
-Pixel xl | 0c |
-
-
-<table>
- <thead>
- <tr>
- <th>Model Name</th>
- <th>Device </th>
- <th>Mean inference time (std dev)</th>
- </tr>
- </thead>
- <tr>
- <td rowspan = 2>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
- </td>
- <td>Pixel 2 </td>
- <td>166.5 ms (2.6 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>122.9 ms (1.8 ms) </td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz">Mobilenet_1.0_224 (quant)</a>
- </td>
- <td>Pixel 2 </td>
- <td>69.5 ms (0.9 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>78.9 ms (2.2 ms) </td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
- </td>
- <td>Pixel 2 </td>
- <td>273.8 ms (3.5 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>210.8 ms (4.2 ms)</td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
- </td>
- <td>Pixel 2 </td>
- <td>234.0 ms (2.1 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>158.0 ms (2.1 ms)</td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
- </td>
- <td>Pixel 2 </td>
- <td>2846.0 ms (15.0 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>1973.0 ms (15.0 ms) </td>
- </tr>
- <tr>
- <td rowspan = 2>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
- </td>
- <td>Pixel 2 </td>
- <td>3180.0 ms (11.7 ms)</td>
- </tr>
- <tr>
- <td>Pixel xl </td>
- <td>2262.0 ms (21.0 ms) </td>
- </tr>
-
- </table>
-
-# iOS benchmarks
-
-For running iOS benchmarks, the [benchmark
-app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/tools/benchmark/ios)
-was modified to include the appropriate model and `benchmark_params.json` was
-modified to set `num_threads` to 1.
-
-<table>
- <thead>
- <tr>
- <th>Model Name</th>
- <th>Device </th>
- <th>Mean inference time (std dev)</th>
- </tr>
- </thead>
- <tr>
- <td>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz">Mobilenet_1.0_224(float)</a>
- </td>
- <td>iPhone 8 </td>
- <td>32.2 ms (0.8 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz)">Mobilenet_1.0_224 (quant)</a>
- </td>
- <td>iPhone 8 </td>
- <td>24.4 ms (0.8 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/nasnet_mobile_2018_04_27.tgz">NASNet mobile</a>
- </td>
- <td>iPhone 8 </td>
- <td>60.3 ms (0.6 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz">SqueezeNet</a>
- </td>
- <td>iPhone 8 </td>
- <td>44.3 (0.7 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_resnet_v2_2018_04_27.tgz">Inception_ResNet_V2</a>
- </td>
- <td>iPhone 8</td>
- <td>562.4 ms (18.2 ms)</td>
- </tr>
- <tr>
- <td>
- <a href="https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz">Inception_V4</a>
- </td>
- <td>iPhone 8 </td>
- <td>661.0 ms (29.2 ms)</td>
- </tr>
- </table>
diff --git a/tensorflow/contrib/lite/g3doc/custom_operators.md b/tensorflow/contrib/lite/g3doc/custom_operators.md
index 972e57f73e..2296f5a064 100644
--- a/tensorflow/contrib/lite/g3doc/custom_operators.md
+++ b/tensorflow/contrib/lite/g3doc/custom_operators.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# How to use custom operators
TensorFlow Lite currently supports a subset of TensorFlow operators. However, it
@@ -89,3 +92,47 @@ builtins.AddCustom("Sin", Register_SIN());
Note that a similar process as above can be followed for supporting for a set of
operations instead of a single operator.
+
+## Best Practices for writing custom operators
+
+1. Optimize memory allocations and de-allocations cautiously. It is more
+ efficient to allocate memory in Prepare() instead of Invoke(), and allocate
+ memory before a loop instead of in every iteration. Use temporary tensors
+ data rather than mallocing yourself (see item 2). Use pointers/references
+ instead of copying as much as possible.
+
+2. If a data structure will persist during the entire operation, we advise
+ pre-allocating the memory using temporary tensors. You may need to use
+ OpData struct to reference the tensor indices in other functions. See
+ example in the
+ [kernel for convolution](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/lite/kernels/conv.cc).
+ A sample code snippet is below
+
+ ```
+ auto* op_data = reinterpret_cast<OpData*>(node->user_data);
+ TfLiteIntArrayFree(node->temporaries);
+ node->temporaries = TfLiteIntArrayCreate(1);
+ node->temporaries->data[0] = op_data->temp_tensor_index;
+ TfLiteTensor* temp_tensor = &context->tensors[op_data->temp_tensor_index];
+ temp_tensor->type = kTfLiteFloat32;
+ temp_tensor->allocation_type = kTfLiteArenaRw;
+ ```
+
+3. If it doesn't cost too much wasted memory, prefer using a static fixed size
+ array (or in Resize() pre-allocated std::vector) rather than using a
+ dynamically allocating std::vector every iteration of execution.
+
+4. Avoid instantiating standard library container templates that don't already
+ exist, because they affect binary size. For example, if you need a std::map
+ in your operation that doesn't exist in other kernels, using a std::vector
+ with direct indexing mapping could work while keeping the binary size small.
+ See what other kernels use to gain insight (or ask).
+
+5. Check the pointer to the memory returned by malloc. If this pointer is
+ nullptr, no operations should be performed using that pointer. If you
+ malloc() in a function and have an error exit, deallocate memory before you
+ exit.
+
+6. Use TF_LITE_ENSURE(context, condition) to check for a specific condition.
+ Your code must not leave memory hanging when TF_LITE_ENSURE is done, i.e.,
+ these should be done before any resources are allocated that will leak.
diff --git a/tensorflow/docs_src/mobile/tflite/demo_android.md b/tensorflow/contrib/lite/g3doc/demo_android.md
index fdf0bcf3c1..d79a2696b4 100644
--- a/tensorflow/docs_src/mobile/tflite/demo_android.md
+++ b/tensorflow/contrib/lite/g3doc/demo_android.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Android Demo App
An example Android application using TensorFLow Lite is available
diff --git a/tensorflow/docs_src/mobile/tflite/demo_ios.md b/tensorflow/contrib/lite/g3doc/demo_ios.md
index 3be21da89f..a554898899 100644
--- a/tensorflow/docs_src/mobile/tflite/demo_ios.md
+++ b/tensorflow/contrib/lite/g3doc/demo_ios.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# iOS Demo App
The TensorFlow Lite demo is a camera app that continuously classifies whatever
diff --git a/tensorflow/docs_src/mobile/tflite/devguide.md b/tensorflow/contrib/lite/g3doc/devguide.md
index b168d6c183..dc9cc98c08 100644
--- a/tensorflow/docs_src/mobile/tflite/devguide.md
+++ b/tensorflow/contrib/lite/g3doc/devguide.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Developer Guide
Using a TensorFlow Lite model in your mobile app requires multiple
@@ -56,7 +59,7 @@ both floating point and quantized inference.
A developer may choose to train a custom model using Tensorflow (see the
[TensorFlow tutorials](../../tutorials/) for examples of building and training
models). If you have already written a model, the first step is to export this
-to a @{tf.GraphDef} file. This is required because some formats do not store the
+to a `tf.GraphDef` file. This is required because some formats do not store the
model structure outside the code, and we must communicate with other parts of the
framework. See
[Exporting the Inference Graph](https://github.com/tensorflow/models/blob/master/research/slim/README.md)
@@ -71,12 +74,12 @@ grow in future Tensorflow Lite releases.
## 2. Convert the model format
The model generated (or downloaded) in the previous step is a *standard*
-Tensorflow model and you should now have a .pb or .pbtxt @{tf.GraphDef} file.
+Tensorflow model and you should now have a .pb or .pbtxt `tf.GraphDef` file.
Models generated with transfer learning (re-training) or custom models must be
converted—but, we must first freeze the graph to convert the model to the
Tensorflow Lite format. This process uses several model formats:
-* @{tf.GraphDef} (.pb) —A protobuf that represents the TensorFlow training or
+* `tf.GraphDef` (.pb) —A protobuf that represents the TensorFlow training or
computation graph. It contains operators, tensors, and variables definitions.
* *CheckPoint* (.ckpt) —Serialized variables from a TensorFlow graph. Since this
does not contain a graph structure, it cannot be interpreted by itself.
@@ -143,11 +146,11 @@ containing the model architecture. The [frozen_graph.pb](https://storage.googlea
file used here is available for download. `output_file` is where the TensorFlow
Lite model will get generated. The `input_type` and `inference_type`
arguments should be set to `FLOAT`, unless converting a
-@{$performance/quantization$quantized model}. Setting the `input_array`,
-`output_array`, and `input_shape` arguments are not as straightforward. The
-easiest way to find these values is to explore the graph using Tensorboard. Reuse
-the arguments for specifying the output nodes for inference in the
-`freeze_graph` step.
+<a href="https://www.tensorflow.org/performance/quantization">quantized model</a>.
+Setting the `input_array`, `output_array`, and `input_shape` arguments are not as
+straightforward. The easiest way to find these values is to explore the graph
+using Tensorboard. Reuse the arguments for specifying the output nodes for
+inference in the `freeze_graph` step.
It is also possible to use the Tensorflow Optimizing Converter with protobufs
from either Python or from the command line (see the
@@ -204,16 +207,16 @@ The open source Android demo app uses the JNI interface and is available
[on GitHub](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/java/demo/app).
You can also download a
[prebuilt APK](http://download.tensorflow.org/deps/tflite/TfLiteCameraDemo.apk).
-See the @{$tflite/demo_android} guide for details.
+See the <a href="../demo_android.md">Android demo</a> guide for details.
-The @{$mobile/android_build} guide has instructions for installing TensorFlow on
-Android and setting up `bazel` and Android Studio.
+The <a href="./android_build.md">Android mobile</a> guide has instructions for
+installing TensorFlow on Android and setting up `bazel` and Android Studio.
### iOS
To integrate a TensorFlow model in an iOS app, see the
[TensorFlow Lite for iOS](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite/g3doc/ios.md)
-guide and @{$tflite/demo_ios} guide.
+guide and <a href="../demo_ios.md">iOS demo</a> guide.
#### Core ML support
diff --git a/tensorflow/contrib/lite/g3doc/ios.md b/tensorflow/contrib/lite/g3doc/ios.md
index e0358a444d..d78d373ccf 100644
--- a/tensorflow/contrib/lite/g3doc/ios.md
+++ b/tensorflow/contrib/lite/g3doc/ios.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite for iOS
## Building
diff --git a/tensorflow/contrib/lite/g3doc/models.md b/tensorflow/contrib/lite/g3doc/models.md
index c1c8ef049f..3292aece0e 100644
--- a/tensorflow/contrib/lite/g3doc/models.md
+++ b/tensorflow/contrib/lite/g3doc/models.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# List of Hosted Models
## Image classification (Float Models)
@@ -39,22 +42,22 @@ single thread large core.
Model Name | Paper_Model_Files | Model_Size | Top-1 Accuracy | Top-5 Accuracy | TF Lite Performance
------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | ---------: | -------------: | -------------: | ------------------:
-Mobilenet_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb | 39.9% | 65.8% | 3.7 ms
-Mobilenet_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb | 43.5% | 69.1% | 5.5 ms
-Mobilenet_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb | 45.8% | 71.9% | 7.9 ms
-Mobilenet_0.25_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_224_quant.tgz) | 0.5 Mb | 48.2% | 73.8% | 10.4 ms
-Mobilenet_0.50_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_128_quant.tgz) | 1.4 Mb | 54.9% | 78.9% | 8.8 ms
-Mobilenet_0.50_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160_quant.tgz) | 1.4 Mb | 57.7% | 81.3% | 13.0 ms
-Mobilenet_0.50_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_192_quant.tgz) | 1.4 Mb | 60.4% | 83.2% | 18.3 ms
-Mobilenet_0.50_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_224_quant.tgz) | 1.4 Mb | 62.2% | 84.5% | 24.7 ms
-Mobilenet_0.75_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_128_quant.tgz) | 2.6 Mb | 59.8% | 82.8% | 16.2 ms
-Mobilenet_0.75_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_160_quant.tgz) | 2.6 Mb | 63.9% | 85.5% | 24.3 ms
-Mobilenet_0.75_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_192_quant.tgz) | 2.6 Mb | 66.2% | 87.1% | 33.8 ms
-Mobilenet_0.75_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.75_224_quant.tgz) | 2.6 Mb | 67.9% | 88.1% | 45.4 ms
-Mobilenet_1.0_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_128_quant.tgz) | 4.3 Mb | 64.0% | 85.5% | 24.9 ms
-Mobilenet_1.0_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_160_quant.tgz) | 4.3 Mb | 67.3% | 87.7% | 37.4 ms
-Mobilenet_1.0_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_192_quant.tgz) | 4.3 Mb | 69.0% | 88.9% | 51.9 ms
-Mobilenet_1.0_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224_quant.tgz) | 4.3 Mb | 69.7% | 89.5% | 70.2 ms
+Mobilenet_0.25_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_128_quant.tgz) | 0.5 Mb | 39.7% | 65.8% | 3.7 ms
+Mobilenet_0.25_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_160_quant.tgz) | 0.5 Mb | 41.9% | 69.1% | 5.5 ms
+Mobilenet_0.25_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_192_quant.tgz) | 0.5 Mb | 45.3% | 71.9% | 7.9 ms
+Mobilenet_0.25_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.25_224_quant.tgz) | 0.5 Mb | 46.4% | 73.8% | 10.4 ms
+Mobilenet_0.50_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_128_quant.tgz) | 1.4 Mb | 54.1% | 78.9% | 8.8 ms
+Mobilenet_0.50_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_160_quant.tgz) | 1.4 Mb | 57.6% | 81.3% | 13.0 ms
+Mobilenet_0.50_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_192_quant.tgz) | 1.4 Mb | 59.1% | 83.2% | 18.3 ms
+Mobilenet_0.50_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.5_224_quant.tgz) | 1.4 Mb | 61.0% | 84.5% | 24.7 ms
+Mobilenet_0.75_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_128_quant.tgz) | 2.6 Mb | 52.5% | 82.8% | 16.2 ms
+Mobilenet_0.75_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_160_quant.tgz) | 2.6 Mb | 63.6% | 85.5% | 24.3 ms
+Mobilenet_0.75_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_192_quant.tgz) | 2.6 Mb | 61.1% | 87.1% | 33.8 ms
+Mobilenet_0.75_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_0.75_224_quant.tgz) | 2.6 Mb | 66.7% | 88.1% | 45.4 ms
+Mobilenet_1.0_128_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_128_quant.tgz) | 4.3 Mb | 62.7% | 85.5% | 24.9 ms
+Mobilenet_1.0_160_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_160_quant.tgz) | 4.3 Mb | 66.6% | 87.7% | 37.4 ms
+Mobilenet_1.0_192_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_192_quant.tgz) | 4.3 Mb | 69.2% | 88.9% | 51.9 ms
+Mobilenet_1.0_224_quant | [paper](https://arxiv.org/pdf/1712.05877.pdf), [tflite&pb](http://download.tensorflow.org/models/mobilenet_v1_2018_07_12/mobilenet_v1_1.0_224_quant.tgz) | 4.3 Mb | 69.3% | 89.5% | 70.2 ms
## Other models
diff --git a/tensorflow/contrib/lite/g3doc/ops_versioning.md b/tensorflow/contrib/lite/g3doc/ops_versioning.md
index bd2f797e6c..b06f4fd3b8 100644
--- a/tensorflow/contrib/lite/g3doc/ops_versioning.md
+++ b/tensorflow/contrib/lite/g3doc/ops_versioning.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite Ops Versioning
This document describes TensorFlow Lite's op versioning schema. Op
diff --git a/tensorflow/docs_src/mobile/tflite/index.md b/tensorflow/contrib/lite/g3doc/overview.md
index 3d1733024e..be60d7941a 100644
--- a/tensorflow/docs_src/mobile/tflite/index.md
+++ b/tensorflow/contrib/lite/g3doc/overview.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Introduction to TensorFlow Lite
TensorFlow Lite is TensorFlow’s lightweight solution for mobile and embedded
@@ -70,10 +73,9 @@ There are several factors which are fueling interest in this domain:
We believe the next wave of machine learning applications will have significant
processing on mobile and embedded devices.
-## TensorFlow Lite developer preview highlights
+## TensorFlow Lite highlights
-TensorFlow Lite is available as a developer preview and includes the
-following:
+TensorFlow Lite provides:
- A set of core operators, both quantized and float, many of which have been
tuned for mobile platforms. These can be used to create and run custom
@@ -129,9 +131,6 @@ following:
- Java and C++ API support
-Note: This is a developer release, and it’s likely that there will be changes in
-the API in upcoming versions. We do not guarantee backward or forward
-compatibility with this release.
## Getting Started
@@ -201,9 +200,5 @@ possible performance for a particular model on a particular device.
## Next Steps
-For the developer preview, most of our documentation is on GitHub. Please take a
-look at the [TensorFlow Lite
-repository](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite)
-on GitHub for more information and for code samples, demo applications, and
-more.
-
+The TensorFlow Lite [GitHub repository](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/lite).
+contains additional docs, code samples, and demo applications.
diff --git a/tensorflow/docs_src/mobile/tflite/performance.md b/tensorflow/contrib/lite/g3doc/performance.md
index 79bacaaa1b..613e9f97c3 100644
--- a/tensorflow/docs_src/mobile/tflite/performance.md
+++ b/tensorflow/contrib/lite/g3doc/performance.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Performance
This document lists TensorFlow Lite performance benchmarks when running well
diff --git a/tensorflow/contrib/lite/g3doc/rpi.md b/tensorflow/contrib/lite/g3doc/rpi.md
index ab50789307..cdc9172d87 100644
--- a/tensorflow/contrib/lite/g3doc/rpi.md
+++ b/tensorflow/contrib/lite/g3doc/rpi.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite for Raspberry Pi
## Cross compiling
diff --git a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
index 49d00a66ba..0e8f4339fc 100644
--- a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
+++ b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# TensorFlow Lite & TensorFlow Compatibility Guide
TensorFlow Lite supports a number of TensorFlow operations used in common
@@ -815,6 +818,18 @@ Outputs {
}
```
+**PACK**
+
+```
+Inputs {
+ 0: a list of tensors.
+ 1: an integer.
+}
+Outputs {
+ 0: A tensor of stacked tensors.
+}
+```
+
And these are TensorFlow Lite operations that are present but not ready for
custom models yet:
diff --git a/tensorflow/docs_src/mobile/android_build.md b/tensorflow/contrib/lite/g3doc/tfmobile/android_build.md
index f4b07db459..76e16fc9db 100644
--- a/tensorflow/docs_src/mobile/android_build.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/android_build.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Building TensorFlow on Android
To get you started working with TensorFlow on Android, we'll walk through two
@@ -91,7 +94,8 @@ using [ADB](https://developer.android.com/studio/command-line/adb.html). This
requires some knowledge of build systems and Android developer tools, but we'll
guide you through the basics here.
-- First, follow our instructions for @{$install/install_sources$installing from sources}.
+- First, follow our instructions for
+ <a href="http://www.tensorflow.org/install/install_sources">installing from sources</a>.
This will also guide you through installing Bazel and cloning the
TensorFlow code.
diff --git a/tensorflow/docs_src/mobile/mobile_intro.md b/tensorflow/contrib/lite/g3doc/tfmobile/index.md
index baad443308..bd047bfcec 100644
--- a/tensorflow/docs_src/mobile/mobile_intro.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/index.md
@@ -1,4 +1,45 @@
-# Introduction to TensorFlow Mobile
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
+# Overview
+
+TensorFlow was designed to be a good deep learning solution for mobile
+platforms. Currently we have two solutions for deploying machine learning
+applications on mobile and embedded devices: TensorFlow for Mobile and
+<a href="../index.md">TensorFlow Lite</a>.
+
+## TensorFlow Lite versus TensorFlow Mobile
+
+Here are a few of the differences between the two:
+
+- TensorFlow Lite is an evolution of TensorFlow Mobile. In most cases, apps
+ developed with TensorFlow Lite will have a smaller binary size, fewer
+ dependencies, and better performance.
+
+- TensorFlow Lite is in developer preview, so not all use cases are covered yet.
+ We expect you to use TensorFlow Mobile to cover production cases.
+
+- TensorFlow Lite supports only a limited set of operators, so not all models
+ will work on it by default. TensorFlow for Mobile has a fuller set of
+ supported functionality.
+
+TensorFlow Lite provides better performance and a small binary size on mobile
+platforms as well as the ability to leverage hardware acceleration if available
+on their platforms. In addition, it has many fewer dependencies so it can be
+built and hosted on simpler, more constrained device scenarios. TensorFlow Lite
+also allows targeting accelerators through the [Neural Networks
+API](https://developer.android.com/ndk/guides/neuralnetworks/index.html).
+
+TensorFlow Lite currently has coverage for a limited set of operators. While
+TensorFlow for Mobile supports only a constrained set of ops by default, in
+principle if you use an arbitrary operator in TensorFlow, it can be customized
+to build that kernel. Thus use cases which are not currently supported by
+TensorFlow Lite should continue to use TensorFlow for Mobile. As TensorFlow Lite
+evolves, it will gain additional operators, and the decision will be easier to
+make.
+
+
+## Introduction to TensorFlow Mobile
TensorFlow was designed from the ground up to be a good deep learning solution
for mobile platforms like Android and iOS. This mobile guide should help you
@@ -167,7 +208,7 @@ interesting products possible.
TensorFlow runs on Ubuntu Linux, Windows 10, and OS X. For a list of all
supported operating systems and instructions to install TensorFlow, see
-@{$install$Installing Tensorflow}.
+<a href="https://www.tensorflow.org/install">Installing Tensorflow</a>.
Note that some of the sample code we provide for mobile TensorFlow requires you
to compile TensorFlow from source, so you’ll need more than just `pip install`
@@ -241,8 +282,3 @@ results you’ll see. It’s common for an algorithm to get great training accur
numbers but then fail to be useful within a real application because there’s a
mismatch between the dataset and real usage. Prototype end-to-end usage as soon
as possible to create a consistent user experience.
-
-## Next Steps
-
-We suggest you get started by building one of our demos for
-@{$mobile/android_build$Android} or @{$mobile/ios_build$iOS}.
diff --git a/tensorflow/docs_src/mobile/ios_build.md b/tensorflow/contrib/lite/g3doc/tfmobile/ios_build.md
index 4c84a1214a..6223707892 100644
--- a/tensorflow/docs_src/mobile/ios_build.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/ios_build.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Building TensorFlow on iOS
## Using CocoaPods
diff --git a/tensorflow/docs_src/mobile/linking_libs.md b/tensorflow/contrib/lite/g3doc/tfmobile/linking_libs.md
index efef5dd0da..4c2071ed05 100644
--- a/tensorflow/docs_src/mobile/linking_libs.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/linking_libs.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Integrating TensorFlow libraries
Once you have made some progress on a model that addresses the problem you’re
@@ -14,11 +17,11 @@ TensorFlow mobile demo apps.
After you've managed to build the examples, you'll probably want to call
TensorFlow from one of your existing applications. The very easiest way to do
-this is to use the Pod installation steps described
-@{$mobile/ios_build#using_cocoapods$here}, but if you want to build TensorFlow
-from source (for example to customize which operators are included) you'll need
-to break out TensorFlow as a framework, include the right header files, and link
-against the built libraries and dependencies.
+this is to use the Pod installation steps described in
+<a href="./ios_build.md">Building TensorFlow on iOS</a>, but if you want to build
+TensorFlow from source (for example to customize which operators are included)
+you'll need to break out TensorFlow as a framework, include the right header
+files, and link against the built libraries and dependencies.
### Android
@@ -82,10 +85,12 @@ recompile of the core.
To achieve this capability, TensorFlow uses a registration pattern in a lot of
places. In the code, it looks like this:
- class MulKernel : OpKernel {
- Status Compute(OpKernelContext* context) { … }
- };
- REGISTER_KERNEL(MulKernel, “Mul”);
+```
+class MulKernel : OpKernel {
+ Status Compute(OpKernelContext* context) { … }
+};
+REGISTER_KERNEL(MulKernel, “Mul”);
+```
This would be in a standalone `.cc` file linked into your application, either
as part of the main set of kernels or as a separate custom library. The magic
@@ -101,15 +106,17 @@ doesn’t offer a good mechanism for doing this sort of registration, so we have
to resort to some tricky code. Under the hood, the macro is implemented so that
it produces something like this:
- class RegisterMul {
- public:
- RegisterMul() {
- global_kernel_registry()->Register(“Mul”, [](){
- return new MulKernel()
- });
- }
- };
- RegisterMul g_register_mul;
+```
+class RegisterMul {
+ public:
+ RegisterMul() {
+ global_kernel_registry()->Register(“Mul”, [](){
+ return new MulKernel()
+ });
+ }
+};
+RegisterMul g_register_mul;
+```
This sets up a class `RegisterMul` with a constructor that tells the global
kernel registry what function to call when somebody asks it how to create a
@@ -176,8 +183,10 @@ have an experimental script at [rename_protobuf.sh](https://github.com/tensorflo
You need to run this as part of the makefile build, after you’ve downloaded all
the dependencies:
- tensorflow/contrib/makefile/download_dependencies.sh
- tensorflow/contrib/makefile/rename_protobuf.sh
+```
+tensorflow/contrib/makefile/download_dependencies.sh
+tensorflow/contrib/makefile/rename_protobuf.sh
+```
## Calling the TensorFlow API
@@ -193,18 +202,20 @@ use case, while on iOS and Raspberry Pi you call directly into the C++ API.
Here’s what a typical Inference Library sequence looks like on Android:
- // Load the model from disk.
- TensorFlowInferenceInterface inferenceInterface =
- new TensorFlowInferenceInterface(assetManager, modelFilename);
+```
+// Load the model from disk.
+TensorFlowInferenceInterface inferenceInterface =
+new TensorFlowInferenceInterface(assetManager, modelFilename);
- // Copy the input data into TensorFlow.
- inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
+// Copy the input data into TensorFlow.
+inferenceInterface.feed(inputName, floatValues, 1, inputSize, inputSize, 3);
- // Run the inference call.
- inferenceInterface.run(outputNames, logStats);
+// Run the inference call.
+inferenceInterface.run(outputNames, logStats);
- // Copy the output Tensor back into the output array.
- inferenceInterface.fetch(outputName, outputs);
+// Copy the output Tensor back into the output array.
+inferenceInterface.fetch(outputName, outputs);
+```
You can find the source of this code in the [Android examples](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowImageClassifier.java#L107).
@@ -212,27 +223,29 @@ You can find the source of this code in the [Android examples](https://github.co
Here’s the equivalent code for iOS and Raspberry Pi:
- // Load the model.
- PortableReadFileToProto(file_path, &tensorflow_graph);
-
- // Create a session from the model.
- tensorflow::Status s = session->Create(tensorflow_graph);
- if (!s.ok()) {
- LOG(FATAL) << "Could not create TensorFlow Graph: " << s;
- }
-
- // Run the model.
- std::string input_layer = "input";
- std::string output_layer = "output";
- std::vector<tensorflow::Tensor> outputs;
- tensorflow::Status run_status = session->Run({{input_layer, image_tensor}},
+```
+// Load the model.
+PortableReadFileToProto(file_path, &tensorflow_graph);
+
+// Create a session from the model.
+tensorflow::Status s = session->Create(tensorflow_graph);
+if (!s.ok()) {
+ LOG(FATAL) << "Could not create TensorFlow Graph: " << s;
+}
+
+// Run the model.
+std::string input_layer = "input";
+std::string output_layer = "output";
+std::vector<tensorflow::Tensor> outputs;
+tensorflow::Status run_status = session->Run({\{input_layer, image_tensor}},
{output_layer}, {}, &outputs);
- if (!run_status.ok()) {
- LOG(FATAL) << "Running model failed: " << run_status;
- }
+if (!run_status.ok()) {
+ LOG(FATAL) << "Running model failed: " << run_status;
+}
- // Access the output data.
- tensorflow::Tensor* output = &outputs[0];
+// Access the output data.
+tensorflow::Tensor* output = &outputs[0];
+```
This is all based on the
[iOS sample code](https://www.tensorflow.org/code/tensorflow/examples/ios/simple/RunModelViewController.mm),
diff --git a/tensorflow/docs_src/mobile/optimizing.md b/tensorflow/contrib/lite/g3doc/tfmobile/optimizing.md
index 778e4d3a62..a0192c3541 100644
--- a/tensorflow/docs_src/mobile/optimizing.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/optimizing.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Optimizing for mobile
There are some special issues that you have to deal with when you’re trying to
@@ -77,7 +80,7 @@ out of a mobile device's memory faster.
To understand how large your network will be on disk, start by looking at the
size on disk of your `GraphDef` file after you’ve run `freeze_graph` and
-`strip_unused_nodes` on it (see @{$mobile/prepare_models$Preparing models} for
+`strip_unused_nodes` on it (see <a href="./prepare_models.md">Preparing models</a> for
more details on these tools), since then it should only contain
inference-related nodes. To double-check that your results are as expected, run
the `summarize_graph` tool to see how many parameters are in constants:
@@ -103,7 +106,8 @@ you multiply the number of const parameters by four, you should get something
that’s close to the size of the file on disk. You can often get away with only
eight-bits per parameter with very little loss of accuracy in the final result,
so if your file size is too large you can try using
-@{$performance/quantization$quantize_weights} to transform the parameters down.
+<a href="https://www.tensorflow.org/performance/quantization">quantize_weights</a>
+to transform the parameters down.
bazel build tensorflow/tools/graph_transforms:transform_graph && \
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
@@ -292,7 +296,8 @@ run it on a 64-bit ARM device:
You can interpret the results in exactly the same way as the desktop version
above. If you have any trouble figuring out what the right input and output
-names and types are, take a look at the @{$mobile/prepare_models$Preparing models}
+names and types are, take a look at the
+<a href="./prepare_models">Preparing models</a>
page for details about detecting these for your model, and look at the
`summarize_graph` tool which may give you
helpful information.
diff --git a/tensorflow/docs_src/mobile/prepare_models.md b/tensorflow/contrib/lite/g3doc/tfmobile/prepare_models.md
index 2b84dbb973..6b4e4a92bd 100644
--- a/tensorflow/docs_src/mobile/prepare_models.md
+++ b/tensorflow/contrib/lite/g3doc/tfmobile/prepare_models.md
@@ -1,3 +1,6 @@
+book_path: /mobile/_book.yaml
+project_path: /mobile/_project.yaml
+
# Preparing models for mobile deployment
The requirements for storing model information during training are very
@@ -255,8 +258,8 @@ The criteria for including ops and types fall into several categories:
These ops are trimmed by default to optimize for inference on mobile, but it is
possible to alter some build files to change the default. After alternating the
build files, you will need to recompile TensorFlow. See below for more details
-on how to do this, and also see @{$mobile/optimizing#binary_size$Optimizing} for
-more on reducing your binary size.
+on how to do this, and also see <a href="./optimizing.md">optimizing binary size</a>
+for more on reducing your binary size.
### Locate the implementation
diff --git a/tensorflow/contrib/lite/interpreter.cc b/tensorflow/contrib/lite/interpreter.cc
index 0641a08636..26fecceab0 100644
--- a/tensorflow/contrib/lite/interpreter.cc
+++ b/tensorflow/contrib/lite/interpreter.cc
@@ -40,6 +40,16 @@ class NNAPIDelegate {};
namespace {
+TfLiteStatus ReportOpError(TfLiteContext* context, const TfLiteNode& node,
+ const TfLiteRegistration& registration,
+ int node_index, const char* message) {
+ context->ReportError(context, "Node number %d (%s) %s.\n", node_index,
+ EnumNameBuiltinOperator(static_cast<BuiltinOperator>(
+ registration.builtin_code)),
+ message);
+ return kTfLiteError;
+}
+
// Stub method which returns kTfLiteError when the function is forbidden.
// We're registrating this function to several different function to save
// compiled binary size. Please note the restrictions:
@@ -572,9 +582,8 @@ TfLiteStatus Interpreter::PrepareOpsStartingAt(
nodes_and_registration_[node_index].second;
EnsureTensorsVectorCapacity();
if (OpPrepare(registration, &node) == kTfLiteError) {
- context_.ReportError(&context_, "Node %d failed to prepare.\n",
- node_index);
- return kTfLiteError;
+ return ReportOpError(&context_, node, registration, node_index,
+ "failed to prepare");
}
*last_execution_plan_index_prepared = execution_plan_index;
@@ -593,7 +602,7 @@ TfLiteStatus Interpreter::PrepareOpsAndTensors() {
if (!memory_planner_) {
memory_planner_.reset(new ArenaPlanner(
&context_, std::unique_ptr<GraphInfo>(new InterpreterInfo(this)),
- /*preserve_inputs=*/true));
+ /*preserve_inputs=*/true, /*preserve_intermediates*/ false));
memory_planner_->PlanAllocations();
}
@@ -674,9 +683,8 @@ TfLiteStatus Interpreter::Invoke() {
EnsureTensorsVectorCapacity();
tensor_resized_since_op_invoke_ = false;
if (OpInvoke(registration, &node) == kTfLiteError) {
- context_.ReportError(&context_, "Node %d failed to invoke.\n",
- node_index);
- status = kTfLiteError;
+ status = ReportOpError(&context_, node, registration, node_index,
+ "failed to invoke");
}
// Force execution prep for downstream ops if the latest op triggered the
diff --git a/tensorflow/contrib/lite/interpreter.h b/tensorflow/contrib/lite/interpreter.h
index 1a1c3e272b..bc608e2fce 100644
--- a/tensorflow/contrib/lite/interpreter.h
+++ b/tensorflow/contrib/lite/interpreter.h
@@ -629,7 +629,7 @@ class Interpreter {
bool tensor_resized_since_op_invoke_ = false;
// Profiler for this interpreter instance.
- profiling::Profiler* profiler_;
+ profiling::Profiler* profiler_ = nullptr;
// List of active external contexts.
TfLiteExternalContext* external_contexts_[kTfLiteMaxExternalContexts];
diff --git a/tensorflow/contrib/lite/java/AndroidManifest.xml b/tensorflow/contrib/lite/java/AndroidManifest.xml
index f705feacbe..b91c6d149a 100644
--- a/tensorflow/contrib/lite/java/AndroidManifest.xml
+++ b/tensorflow/contrib/lite/java/AndroidManifest.xml
@@ -1,7 +1,12 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
- package="org.tensorflow.lite">
- <application>
- </application>
+ package="org.tensorflow.lite">
+
+ <uses-sdk
+ android:minSdkVersion="4"
+ android:targetSdkVersion="19" />
+
+ <application />
+
</manifest>
diff --git a/tensorflow/contrib/lite/java/BUILD b/tensorflow/contrib/lite/java/BUILD
index 593af81a18..098ba7e773 100644
--- a/tensorflow/contrib/lite/java/BUILD
+++ b/tensorflow/contrib/lite/java/BUILD
@@ -69,6 +69,7 @@ java_test(
size = "small",
srcs = ["src/test/java/org/tensorflow/lite/TensorFlowLiteTest.java"],
javacopts = JAVACOPTS,
+ tags = ["no_oss"],
test_class = "org.tensorflow.lite.TensorFlowLiteTest",
deps = [
":libtensorflowlite_jni.so",
@@ -83,6 +84,7 @@ java_test(
size = "small",
srcs = ["src/test/java/org/tensorflow/lite/DataTypeTest.java"],
javacopts = JAVACOPTS,
+ tags = ["no_oss"],
test_class = "org.tensorflow.lite.DataTypeTest",
deps = [
":libtensorflowlite_jni.so",
@@ -105,6 +107,7 @@ java_test(
"src/testdata/with_custom_op.lite",
],
javacopts = JAVACOPTS,
+ tags = ["no_oss"],
test_class = "org.tensorflow.lite.NativeInterpreterWrapperTest",
deps = [
":libtensorflowlite_jni.so",
@@ -124,6 +127,7 @@ java_test(
"src/testdata/mobilenet.tflite.bin",
],
javacopts = JAVACOPTS,
+ tags = ["no_oss"],
test_class = "org.tensorflow.lite.InterpreterTest",
visibility = ["//visibility:private"],
deps = [
@@ -142,6 +146,7 @@ java_test(
"src/testdata/add.bin",
],
javacopts = JAVACOPTS,
+ tags = ["no_oss"],
test_class = "org.tensorflow.lite.TensorTest",
deps = [
":tensorflowlitelib",
diff --git a/tensorflow/contrib/lite/java/demo/app/build.gradle b/tensorflow/contrib/lite/java/demo/app/build.gradle
index 49868c5a75..92f04c651c 100644
--- a/tensorflow/contrib/lite/java/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/demo/app/build.gradle
@@ -44,7 +44,7 @@ repositories {
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
- androidTestCompile('com.androidx.test.espresso:espresso-core:2.2.2', {
+ androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
exclude group: 'com.android.support', module: 'support-annotations'
})
compile 'com.android.support:appcompat-v7:25.2.0'
diff --git a/tensorflow/contrib/lite/java/ovic/BUILD b/tensorflow/contrib/lite/java/ovic/BUILD
index f232b00045..06f46fb923 100644
--- a/tensorflow/contrib/lite/java/ovic/BUILD
+++ b/tensorflow/contrib/lite/java/ovic/BUILD
@@ -18,6 +18,7 @@ java_test(
"//tensorflow/contrib/lite/java/ovic/src/testdata:ovic_testdata",
],
javacopts = JAVACOPTS,
+ tags = ["no_oss"],
test_class = "org.tensorflow.ovic.OvicClassifierTest",
visibility = ["//visibility:public"],
deps = [
diff --git a/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle b/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
index 3f32d62e5c..2a08608bbb 100644
--- a/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
+++ b/tensorflow/contrib/lite/java/ovic/demo/app/build.gradle
@@ -43,7 +43,7 @@ repositories {
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
- androidTestCompile('com.androidx.test.espresso:espresso-core:2.2.2', {
+ androidTestCompile('androidx.test.espresso:espresso-core:3.1.0-alpha3', {
exclude group: 'com.android.support', module: 'support-annotations'
})
compile 'com.android.support:appcompat-v7:25.2.0'
diff --git a/tensorflow/contrib/lite/kernels/BUILD b/tensorflow/contrib/lite/kernels/BUILD
index edce73989c..c224132cae 100644
--- a/tensorflow/contrib/lite/kernels/BUILD
+++ b/tensorflow/contrib/lite/kernels/BUILD
@@ -12,7 +12,10 @@ tf_cc_test(
name = "optional_tensor_test",
size = "small",
srcs = ["optional_tensor_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -55,6 +58,7 @@ cc_library(
}),
deps = [
":op_macros",
+ "//tensorflow/contrib/lite:arena_planner",
"//tensorflow/contrib/lite:context",
"//tensorflow/contrib/lite/kernels/internal:optimized",
],
@@ -112,7 +116,10 @@ tf_cc_test(
name = "kernel_util_test",
size = "small",
srcs = ["kernel_util_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":kernel_util",
"//tensorflow/contrib/lite/testing:util",
@@ -124,6 +131,7 @@ tf_cc_test(
name = "test_util_test",
size = "small",
srcs = ["test_util_test.cc"],
+ tags = ["no_oss"],
deps = [
":test_util",
"//tensorflow/contrib/lite/testing:util",
@@ -168,6 +176,7 @@ cc_library(
"mfcc.cc",
"mul.cc",
"neg.cc",
+ "pack.cc",
"pad.cc",
"pooling.cc",
"pow.cc",
@@ -232,7 +241,10 @@ tf_cc_test(
name = "audio_spectrogram_test",
size = "small",
srcs = ["audio_spectrogram_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -246,7 +258,10 @@ tf_cc_test(
name = "mfcc_test",
size = "small",
srcs = ["mfcc_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -260,7 +275,10 @@ tf_cc_test(
name = "detection_postprocess_test",
size = "small",
srcs = ["detection_postprocess_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -301,6 +319,7 @@ tf_cc_test(
size = "small",
srcs = ["arg_min_max_test.cc"],
tags = [
+ "no_oss",
"tflite_not_portable_ios",
],
deps = [
@@ -315,7 +334,10 @@ tf_cc_test(
name = "div_test",
size = "small",
srcs = ["div_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -328,7 +350,10 @@ tf_cc_test(
name = "sub_test",
size = "small",
srcs = ["sub_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -341,7 +366,10 @@ tf_cc_test(
name = "transpose_test",
size = "small",
srcs = ["transpose_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -356,7 +384,10 @@ tf_cc_test(
name = "space_to_batch_nd_test",
size = "small",
srcs = ["space_to_batch_nd_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -369,7 +400,10 @@ tf_cc_test(
name = "batch_to_space_nd_test",
size = "small",
srcs = ["batch_to_space_nd_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -382,7 +416,10 @@ tf_cc_test(
name = "cast_test",
size = "small",
srcs = ["cast_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -435,7 +472,10 @@ tf_cc_test(
name = "dequantize_test",
size = "small",
srcs = ["dequantize_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -462,7 +502,10 @@ tf_cc_test(
name = "bidirectional_sequence_lstm_test",
size = "small",
srcs = ["bidirectional_sequence_lstm_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -475,7 +518,10 @@ tf_cc_test(
name = "floor_test",
size = "small",
srcs = ["floor_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -488,7 +534,10 @@ tf_cc_test(
name = "elementwise_test",
size = "small",
srcs = ["elementwise_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -501,7 +550,10 @@ tf_cc_test(
name = "unidirectional_sequence_lstm_test",
size = "small",
srcs = ["unidirectional_sequence_lstm_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -515,6 +567,7 @@ tf_cc_test(
size = "small",
srcs = ["bidirectional_sequence_rnn_test.cc"],
tags = [
+ "no_oss",
"tflite_not_portable",
],
deps = [
@@ -529,7 +582,10 @@ tf_cc_test(
name = "unidirectional_sequence_rnn_test",
size = "small",
srcs = ["unidirectional_sequence_rnn_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -555,7 +611,10 @@ tf_cc_test(
name = "exp_test",
size = "small",
srcs = ["exp_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -568,7 +627,10 @@ tf_cc_test(
name = "fake_quant_test",
size = "small",
srcs = ["fake_quant_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -581,7 +643,10 @@ tf_cc_test(
name = "maximum_minimum_test",
size = "small",
srcs = ["maximum_minimum_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -594,7 +659,10 @@ tf_cc_test(
name = "reduce_test",
size = "small",
srcs = ["reduce_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -620,7 +688,10 @@ tf_cc_test(
name = "pad_test",
size = "small",
srcs = ["pad_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -646,7 +717,10 @@ tf_cc_test(
name = "gather_test",
size = "small",
srcs = ["gather_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -660,7 +734,10 @@ tf_cc_test(
name = "topk_v2_test",
size = "small",
srcs = ["topk_v2_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -781,7 +858,10 @@ tf_cc_test(
name = "log_softmax_test",
size = "small",
srcs = ["log_softmax_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -862,7 +942,10 @@ tf_cc_test(
name = "split_test",
size = "small",
srcs = ["split_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -875,7 +958,10 @@ tf_cc_test(
name = "squeeze_test",
size = "small",
srcs = ["squeeze_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -888,7 +974,10 @@ tf_cc_test(
name = "strided_slice_test",
size = "small",
srcs = ["strided_slice_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -901,7 +990,10 @@ tf_cc_test(
name = "tile_test",
size = "small",
srcs = ["tile_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -918,6 +1010,7 @@ tf_cc_test(
"comparisons_test.cc",
],
tags = [
+ "no_oss",
"tflite_not_portable_ios",
],
deps = [
@@ -932,7 +1025,10 @@ tf_cc_test(
name = "neg_test",
size = "small",
srcs = ["neg_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
@@ -948,6 +1044,7 @@ tf_cc_test(
"select_test.cc",
],
tags = [
+ "no_oss",
"tflite_not_portable_ios",
],
deps = [
@@ -965,6 +1062,7 @@ tf_cc_test(
"slice_test.cc",
],
tags = [
+ "no_oss",
"tflite_not_portable_ios",
],
deps = [
@@ -979,12 +1077,14 @@ tf_cc_test(
name = "transpose_conv_test",
size = "small",
srcs = ["transpose_conv_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:framework",
"//tensorflow/contrib/lite/kernels:test_util",
- "@com_google_absl//absl/memory",
"@com_google_googletest//:gtest",
],
)
@@ -993,7 +1093,10 @@ tf_cc_test(
name = "expand_dims_test",
size = "small",
srcs = ["expand_dims_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -1007,7 +1110,10 @@ tf_cc_test(
name = "sparse_to_dense_test",
size = "small",
srcs = ["sparse_to_dense_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -1021,7 +1127,10 @@ tf_cc_test(
name = "shape_test",
size = "small",
srcs = ["shape_test.cc"],
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":builtin_ops",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -1035,6 +1144,23 @@ tf_cc_test(
name = "pow_test",
size = "small",
srcs = ["pow_test.cc"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
+ deps = [
+ ":builtin_ops",
+ "//tensorflow/contrib/lite:builtin_op_data",
+ "//tensorflow/contrib/lite:framework",
+ "//tensorflow/contrib/lite/kernels:test_util",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+tf_cc_test(
+ name = "pack_test",
+ size = "small",
+ srcs = ["pack_test.cc"],
tags = ["tflite_not_portable_ios"],
deps = [
":builtin_ops",
diff --git a/tensorflow/contrib/lite/kernels/activations.cc b/tensorflow/contrib/lite/kernels/activations.cc
index 99f81c4a8a..6e13b8c667 100644
--- a/tensorflow/contrib/lite/kernels/activations.cc
+++ b/tensorflow/contrib/lite/kernels/activations.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -186,8 +185,8 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, 0);
TF_LITE_ENSURE_EQ(context, input->type, output->type);
- TF_LITE_ENSURE(context,
- NumDimensions(input) == 2 || NumDimensions(input) == 4);
+ const int num_dims = NumDimensions(input);
+ TF_LITE_ENSURE(context, num_dims == 1 || num_dims == 2 || num_dims == 4);
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
@@ -365,13 +364,9 @@ TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
-// Takes a 2D tensor and perform softmax along the second dimension.
-void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
- TfLiteSoftmaxParams* params) {
- const int batch_size = input->dims->data[0];
- const int input_size = input->dims->data[1];
- float* in = input->data.f;
- float* out = output->data.f;
+// Performs softmax along the input of size (input_size * batch_size).
+void Softmax(const float* in, const int input_size, const int batch_size,
+ const float beta, float* out) {
TF_LITE_ASSERT(input_size > 0);
// For each batch
@@ -385,7 +380,7 @@ void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
// Compute the normalized sum of exps.
float exp_sum = 0.0;
for (int i = 0; i < input_size; i++) {
- out[i] = std::exp((in[i] - max_coeff) * params->beta);
+ out[i] = std::exp((in[i] - max_coeff) * beta);
exp_sum += out[i];
}
@@ -401,6 +396,33 @@ void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
}
}
+// Takes a 1D tensor and performs softmax along it.
+void Softmax1DFloat(const TfLiteTensor* input, TfLiteTensor* output,
+ TfLiteSoftmaxParams* params) {
+ const int input_size = input->dims->data[0];
+ Softmax(input->data.f, input_size, 1, params->beta, output->data.f);
+}
+
+// Takes a 2D tensor and perform softmax along the last dimension.
+void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
+ TfLiteSoftmaxParams* params) {
+ const int batch_size = input->dims->data[0];
+ const int input_size = input->dims->data[1];
+ Softmax(input->data.f, input_size, batch_size, params->beta, output->data.f);
+}
+
+void Softmax1DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
+ TfLiteSoftmaxParams* params, OpData* data) {
+ // TODO(ahentz): this is arguably a dirty trick. Since the implementation
+ // always traverses the last dimension of a 4D tensor, we will pretend our 1D
+ // tensor is 4D in a special way. We will convert a (Y) shape into a (1,
+ // 1, 1, Y) shape.
+ const int input_size = input->dims->data[0];
+ optimized_ops::Softmax(
+ GetTensorData<uint8_t>(input), GetTensorShape({1, 1, 1, input_size}),
+ data->input_multiplier, data->input_left_shift, data->diff_min,
+ GetTensorData<uint8_t>(output), GetTensorShape({1, 1, 1, input_size}));
+}
void Softmax2DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
TfLiteSoftmaxParams* params, OpData* data) {
// TODO(ahentz): this is arguably a dirty trick. Since the implementation
@@ -443,6 +465,10 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
// dimensions.
switch (input->type) {
case kTfLiteFloat32: {
+ if (NumDimensions(input) == 1) {
+ Softmax1DFloat(input, output, params);
+ return kTfLiteOk;
+ }
if (NumDimensions(input) == 2) {
Softmax2DFloat(input, output, params);
return kTfLiteOk;
@@ -452,11 +478,15 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
context->ReportError(
- context, "Only 2D and 4D tensors supported currently, got %dD.",
+ context, "Only 1D, 2D and 4D tensors supported currently, got %dD.",
NumDimensions(input));
return kTfLiteError;
}
case kTfLiteUInt8: {
+ if (NumDimensions(input) == 1) {
+ Softmax1DQuantized(input, output, params, data);
+ return kTfLiteOk;
+ }
if (NumDimensions(input) == 2) {
Softmax2DQuantized(input, output, params, data);
return kTfLiteOk;
diff --git a/tensorflow/contrib/lite/kernels/activations_test.cc b/tensorflow/contrib/lite/kernels/activations_test.cc
index 587e1303da..083cdf78d7 100644
--- a/tensorflow/contrib/lite/kernels/activations_test.cc
+++ b/tensorflow/contrib/lite/kernels/activations_test.cc
@@ -339,6 +339,29 @@ TEST(QuantizedActivationsOpTest, Softmax4D) {
kQuantizedTolerance)));
}
+TEST(FloatActivationsOpTest, Softmax1D) {
+ FloatActivationsOpModel m(0.1,
+ /*input=*/{TensorType_FLOAT32, {8}});
+ m.SetInput({0, -6, 2, 4, 3, -2, 10, 1});
+ m.Invoke();
+ EXPECT_THAT(
+ m.GetOutput(),
+ ElementsAreArray(ArrayFloatNear(
+ {.09752, .05352, .11911, .14548, .13164, .07984, .26509, .10778})));
+}
+
+TEST(QuantizedActivationsOpTest, Softmax1D) {
+ QuantizedActivationsOpModel m(0.1,
+ /*input=*/{TensorType_UINT8, {8}, -10, 10});
+ m.SetInput<uint8_t>({0, -6, 2, 4, 3, -2, 10, 1});
+ m.Invoke();
+ EXPECT_THAT(
+ m.GetDequantizedOutput<uint8_t>(),
+ ElementsAreArray(ArrayFloatNear({0.09766, 0.05469, 0.12109, 0.14453,
+ 0.13281, 0.07813, 0.26563, 0.10938},
+ kQuantizedTolerance)));
+}
+
TEST(FloatActivationsOpTest, Softmax2D) {
FloatActivationsOpModel m(0.1,
/*input=*/{TensorType_FLOAT32, {2, 4}});
diff --git a/tensorflow/contrib/lite/kernels/add.cc b/tensorflow/contrib/lite/kernels/add.cc
index f44d531cbf..af9b5c7013 100644
--- a/tensorflow/contrib/lite/kernels/add.cc
+++ b/tensorflow/contrib/lite/kernels/add.cc
@@ -110,15 +110,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
QuantizeMultiplierSmallerThanOneExp(
real_input1_multiplier, &data->input1_multiplier, &data->input1_shift);
- data->input1_shift *= -1;
QuantizeMultiplierSmallerThanOneExp(
real_input2_multiplier, &data->input2_multiplier, &data->input2_shift);
- data->input2_shift *= -1;
QuantizeMultiplierSmallerThanOneExp(
real_output_multiplier, &data->output_multiplier, &data->output_shift);
- data->output_shift *= -1;
CalculateActivationRangeUint8(params->activation, output,
&data->output_activation_min,
@@ -152,14 +149,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
CheckedLog2(output->params.scale, &output_scale_log2_rounded);
TF_LITE_ENSURE(context, output_scale_is_pot);
- data->input1_shift = output_scale_log2_rounded - input1_scale_log2_rounded;
- data->input2_shift = output_scale_log2_rounded - input2_scale_log2_rounded;
+ data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded;
+ data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded;
// Shifting of one input is supported. The graph quantization should ensure
// that the other input matches the output.
TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0);
- TF_LITE_ENSURE(context, data->input1_shift >= 0);
- TF_LITE_ENSURE(context, data->input2_shift >= 0);
+ TF_LITE_ENSURE(context, data->input1_shift <= 0);
+ TF_LITE_ENSURE(context, data->input2_shift <= 0);
CalculateActivationRangeQuantized(context, params->activation, output,
&data->output_activation_min,
@@ -173,24 +170,27 @@ template <KernelType kernel_type>
void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params,
const OpData* data, const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) {
-#define TF_LITE_ADD(type, opname, data_type) \
- data_type output_activation_min, output_activation_max; \
- CalculateActivationRange(params->activation, &output_activation_min, \
- &output_activation_max); \
- type::opname(GetTensorData<data_type>(input1), GetTensorDims(input1), \
- GetTensorData<data_type>(input2), GetTensorDims(input2), \
- output_activation_min, output_activation_max, \
- GetTensorData<data_type>(output), GetTensorDims(output))
+#define TF_LITE_ADD(type, opname, data_type) \
+ data_type output_activation_min, output_activation_max; \
+ CalculateActivationRange(params->activation, &output_activation_min, \
+ &output_activation_max); \
+ tflite::ArithmeticParams op_params; \
+ SetActivationParams(output_activation_min, output_activation_max, \
+ &op_params); \
+ type::opname(op_params, GetTensorShape(input1), \
+ GetTensorData<data_type>(input1), GetTensorShape(input2), \
+ GetTensorData<data_type>(input2), GetTensorShape(output), \
+ GetTensorData<data_type>(output))
if (output->type == kTfLiteInt32) {
if (kernel_type == kReference) {
if (data->requires_broadcast) {
- TF_LITE_ADD(reference_ops, BroadcastAdd, int32_t);
+ TF_LITE_ADD(reference_ops, BroadcastAdd4DSlow, int32_t);
} else {
TF_LITE_ADD(reference_ops, Add, int32_t);
}
} else {
if (data->requires_broadcast) {
- TF_LITE_ADD(optimized_ops, BroadcastAdd, int32_t);
+ TF_LITE_ADD(optimized_ops, BroadcastAdd4DSlow, int32_t);
} else {
TF_LITE_ADD(optimized_ops, Add, int32_t);
}
@@ -198,13 +198,13 @@ void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params,
} else if (output->type == kTfLiteFloat32) {
if (kernel_type == kReference) {
if (data->requires_broadcast) {
- TF_LITE_ADD(reference_ops, BroadcastAdd, float);
+ TF_LITE_ADD(reference_ops, BroadcastAdd4DSlow, float);
} else {
TF_LITE_ADD(reference_ops, Add, float);
}
} else {
if (data->requires_broadcast) {
- TF_LITE_ADD(optimized_ops, BroadcastAdd, float);
+ TF_LITE_ADD(optimized_ops, BroadcastAdd4DSlow, float);
} else {
TF_LITE_ADD(optimized_ops, Add, float);
}
@@ -220,30 +220,43 @@ TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input2,
TfLiteTensor* output) {
if (output->type == kTfLiteUInt8) {
-#define TF_LITE_ADD(type, opname) \
- type::opname( \
- data->left_shift, GetTensorData<uint8_t>(input1), GetTensorDims(input1), \
- data->input1_offset, data->input1_multiplier, data->input1_shift, \
- GetTensorData<uint8_t>(input2), GetTensorDims(input2), \
- data->input2_offset, data->input2_multiplier, data->input2_shift, \
- data->output_offset, data->output_multiplier, data->output_shift, \
- data->output_activation_min, data->output_activation_max, \
- GetTensorData<uint8_t>(output), GetTensorDims(output));
+#define TF_LITE_ADD(type, opname) \
+ tflite::ArithmeticParams op_params; \
+ op_params.left_shift = data->left_shift; \
+ op_params.input1_offset = data->input1_offset; \
+ op_params.input1_multiplier = data->input1_multiplier; \
+ op_params.input1_shift = data->input1_shift; \
+ op_params.input2_offset = data->input2_offset; \
+ op_params.input2_multiplier = data->input2_multiplier; \
+ op_params.input2_shift = data->input2_shift; \
+ op_params.output_offset = data->output_offset; \
+ op_params.output_multiplier = data->output_multiplier; \
+ op_params.output_shift = data->output_shift; \
+ SetActivationParams(data->output_activation_min, \
+ data->output_activation_max, &op_params); \
+ type::opname(op_params, GetTensorShape(input1), \
+ GetTensorData<uint8_t>(input1), GetTensorShape(input2), \
+ GetTensorData<uint8_t>(input2), GetTensorShape(output), \
+ GetTensorData<uint8_t>(output))
// The quantized version of Add doesn't support activations, so we
// always use BroadcastAdd.
if (kernel_type == kReference) {
- TF_LITE_ADD(reference_ops, BroadcastAdd);
+ TF_LITE_ADD(reference_ops, BroadcastAdd4DSlow);
} else {
- TF_LITE_ADD(optimized_ops, BroadcastAdd);
+ TF_LITE_ADD(optimized_ops, BroadcastAdd4DSlow);
}
#undef TF_LITE_ADD
} else if (output->type == kTfLiteInt16) {
-#define TF_LITE_ADD(type, opname) \
- type::opname(GetTensorData<int16_t>(input1), GetTensorDims(input1), \
- data->input1_shift, GetTensorData<int16_t>(input2), \
- GetTensorDims(input2), data->input2_shift, \
- data->output_activation_min, data->output_activation_max, \
- GetTensorData<int16_t>(output), GetTensorDims(output));
+#define TF_LITE_ADD(type, opname) \
+ tflite::ArithmeticParams op_params; \
+ op_params.input1_shift = data->input1_shift; \
+ op_params.input2_shift = data->input2_shift; \
+ SetActivationParams(data->output_activation_min, \
+ data->output_activation_max, &op_params); \
+ type::opname(op_params, GetTensorShape(input1), \
+ GetTensorData<int16_t>(input1), GetTensorShape(input2), \
+ GetTensorData<int16_t>(input2), GetTensorShape(output), \
+ GetTensorData<int16_t>(output))
// The quantized version of Add doesn't support activations, so we
// always use BroadcastAdd.
if (kernel_type == kReference) {
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
index 3425288f02..a11a59aa05 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc
@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -276,27 +275,33 @@ TfLiteStatus CheckLstmTensorDimensions(
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell) {
- CheckLstmTensorDimensions(
- context, node, n_input, n_output, n_cell, kFwInputToInputWeightsTensor,
- kFwInputToForgetWeightsTensor, kFwInputToCellWeightsTensor,
- kFwInputToOutputWeightsTensor, kFwRecurrentToInputWeightsTensor,
- kFwRecurrentToForgetWeightsTensor, kFwRecurrentToCellWeightsTensor,
- kFwRecurrentToOutputWeightsTensor, kFwCellToInputWeightsTensor,
- kFwCellToForgetWeightsTensor, kFwCellToOutputWeightsTensor,
- kFwInputGateBiasTensor, kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
- kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
- kFwProjectionBiasTensor);
-
- CheckLstmTensorDimensions(
- context, node, n_input, n_output, n_cell, kBwInputToInputWeightsTensor,
- kBwInputToForgetWeightsTensor, kBwInputToCellWeightsTensor,
- kBwInputToOutputWeightsTensor, kBwRecurrentToInputWeightsTensor,
- kBwRecurrentToForgetWeightsTensor, kBwRecurrentToCellWeightsTensor,
- kBwRecurrentToOutputWeightsTensor, kBwCellToInputWeightsTensor,
- kBwCellToForgetWeightsTensor, kBwCellToOutputWeightsTensor,
- kBwInputGateBiasTensor, kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
- kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
- kBwProjectionBiasTensor);
+ TF_LITE_ENSURE_OK(
+ context,
+ CheckLstmTensorDimensions(
+ context, node, n_input, n_output, n_cell,
+ kFwInputToInputWeightsTensor, kFwInputToForgetWeightsTensor,
+ kFwInputToCellWeightsTensor, kFwInputToOutputWeightsTensor,
+ kFwRecurrentToInputWeightsTensor, kFwRecurrentToForgetWeightsTensor,
+ kFwRecurrentToCellWeightsTensor, kFwRecurrentToOutputWeightsTensor,
+ kFwCellToInputWeightsTensor, kFwCellToForgetWeightsTensor,
+ kFwCellToOutputWeightsTensor, kFwInputGateBiasTensor,
+ kFwForgetGateBiasTensor, kFwCellGateBiasTensor,
+ kFwOutputGateBiasTensor, kFwProjectionWeightsTensor,
+ kFwProjectionBiasTensor));
+
+ TF_LITE_ENSURE_OK(
+ context,
+ CheckLstmTensorDimensions(
+ context, node, n_input, n_output, n_cell,
+ kBwInputToInputWeightsTensor, kBwInputToForgetWeightsTensor,
+ kBwInputToCellWeightsTensor, kBwInputToOutputWeightsTensor,
+ kBwRecurrentToInputWeightsTensor, kBwRecurrentToForgetWeightsTensor,
+ kBwRecurrentToCellWeightsTensor, kBwRecurrentToOutputWeightsTensor,
+ kBwCellToInputWeightsTensor, kBwCellToForgetWeightsTensor,
+ kBwCellToOutputWeightsTensor, kBwInputGateBiasTensor,
+ kBwForgetGateBiasTensor, kBwCellGateBiasTensor,
+ kBwOutputGateBiasTensor, kBwProjectionWeightsTensor,
+ kBwProjectionBiasTensor));
// Check if Forward and Backward tensors match along required dimensions.
return kTfLiteOk;
@@ -334,7 +339,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_fw_output = fw_recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_fw_output, n_fw_cell);
+ TF_LITE_ENSURE_OK(
+ context, CheckInputTensorDimensions(context, node, n_input, n_fw_output,
+ n_fw_cell));
// Get the pointer to output, state and scratch buffer tensors.
TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor);
@@ -404,7 +411,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_bw_output = bw_recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_bw_output, n_bw_cell);
+ TF_LITE_ENSURE_OK(
+ context, CheckInputTensorDimensions(context, node, n_input, n_bw_output,
+ n_bw_cell));
// Get the pointer to output, output_state and cell_state buffer tensors.
TfLiteTensor* bw_output = GetOutput(context, node, kBwOutputTensor);
diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_rnn.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_rnn.cc
index aa24c1f34c..517309a226 100644
--- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_rnn.cc
+++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_rnn.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdlib>
diff --git a/tensorflow/contrib/lite/kernels/concatenation.cc b/tensorflow/contrib/lite/kernels/concatenation.cc
index 45ea8d0049..ad211e9c67 100644
--- a/tensorflow/contrib/lite/kernels/concatenation.cc
+++ b/tensorflow/contrib/lite/kernels/concatenation.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/kernels/conv.cc b/tensorflow/contrib/lite/kernels/conv.cc
index a4fe9e5550..6f174763df 100644
--- a/tensorflow/contrib/lite/kernels/conv.cc
+++ b/tensorflow/contrib/lite/kernels/conv.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <algorithm>
#include <cassert>
#include <cmath>
diff --git a/tensorflow/contrib/lite/kernels/depthwise_conv.cc b/tensorflow/contrib/lite/kernels/depthwise_conv.cc
index 16e5f1d065..21518156b8 100644
--- a/tensorflow/contrib/lite/kernels/depthwise_conv.cc
+++ b/tensorflow/contrib/lite/kernels/depthwise_conv.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/kernels/div.cc b/tensorflow/contrib/lite/kernels/div.cc
index bc5c3783fd..d7420ddd8e 100644
--- a/tensorflow/contrib/lite/kernels/div.cc
+++ b/tensorflow/contrib/lite/kernels/div.cc
@@ -78,29 +78,44 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
template <KernelType kernel_type>
-void EvalFloat(TfLiteContext* context, TfLiteNode* node,
- TfLiteDivParams* params, const OpData* data,
- const TfLiteTensor* input1, const TfLiteTensor* input2,
- TfLiteTensor* output) {
- float output_activation_min, output_activation_max;
- CalculateActivationRange(params->activation, &output_activation_min,
- &output_activation_max);
-#define TF_LITE_DIV(type, opname) \
- type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
- GetTensorData<float>(input2), GetTensorDims(input2), \
- output_activation_min, output_activation_max, \
- GetTensorData<float>(output), GetTensorDims(output))
- if (kernel_type == kReference) {
- if (data->requires_broadcast) {
- TF_LITE_DIV(reference_ops, BroadcastDiv);
+void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params,
+ const OpData* data, const TfLiteTensor* input1,
+ const TfLiteTensor* input2, TfLiteTensor* output) {
+#define TF_LITE_DIV(type, opname, data_type) \
+ data_type output_activation_min, output_activation_max; \
+ CalculateActivationRange(params->activation, &output_activation_min, \
+ &output_activation_max); \
+ type::opname(GetTensorData<data_type>(input1), GetTensorDims(input1), \
+ GetTensorData<data_type>(input2), GetTensorDims(input2), \
+ output_activation_min, output_activation_max, \
+ GetTensorData<data_type>(output), GetTensorDims(output))
+ if (output->type == kTfLiteInt32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_DIV(reference_ops, BroadcastDiv, int32_t);
+ } else {
+ TF_LITE_DIV(reference_ops, Div, int32_t);
+ }
} else {
- TF_LITE_DIV(reference_ops, Div);
+ if (data->requires_broadcast) {
+ TF_LITE_DIV(optimized_ops, BroadcastDiv, int32_t);
+ } else {
+ TF_LITE_DIV(optimized_ops, Div, int32_t);
+ }
}
- } else {
- if (data->requires_broadcast) {
- TF_LITE_DIV(optimized_ops, BroadcastDiv);
+ } else if (output->type == kTfLiteFloat32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_DIV(reference_ops, BroadcastDiv, float);
+ } else {
+ TF_LITE_DIV(reference_ops, Div, float);
+ }
} else {
- TF_LITE_DIV(optimized_ops, Div);
+ if (data->requires_broadcast) {
+ TF_LITE_DIV(optimized_ops, BroadcastDiv, float);
+ } else {
+ TF_LITE_DIV(optimized_ops, Div, float);
+ }
}
}
#undef TF_LITE_DIV
@@ -115,11 +130,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- if (output->type == kTfLiteFloat32) {
- EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
+ if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
+ EvalDiv<kernel_type>(context, node, params, data, input1, input2, output);
} else {
context->ReportError(
- context, "Div only supports FLOAT32 and quantized UINT8 now, got %d.",
+ context,
+ "Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.",
output->type);
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/kernels/div_test.cc b/tensorflow/contrib/lite/kernels/div_test.cc
index 276b8289fb..97aa2fe04e 100644
--- a/tensorflow/contrib/lite/kernels/div_test.cc
+++ b/tensorflow/contrib/lite/kernels/div_test.cc
@@ -52,6 +52,13 @@ class FloatDivOpModel : public BaseDivOpModel {
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
+class IntegerDivOpModel : public BaseDivOpModel {
+ public:
+ using BaseDivOpModel::BaseDivOpModel;
+
+ std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
+};
+
TEST(FloatDivOpTest, NoActivation) {
FloatDivOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
@@ -75,7 +82,7 @@ TEST(FloatDivOpTest, ActivationRELU_N1_TO_1) {
}
TEST(FloatDivOpTest, VariousInputShapes) {
- std::vector<std::initializer_list<int>> test_shapes = {
+ std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
FloatDivOpModel m({TensorType_FLOAT32, test_shapes[i]},
@@ -92,7 +99,7 @@ TEST(FloatDivOpTest, VariousInputShapes) {
}
TEST(FloatDivOpTest, WithBroadcast) {
- std::vector<std::initializer_list<int>> test_shapes = {
+ std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
for (int i = 0; i < test_shapes.size(); ++i) {
FloatDivOpModel m({TensorType_FLOAT32, test_shapes[i]},
@@ -108,6 +115,56 @@ TEST(FloatDivOpTest, WithBroadcast) {
}
}
+TEST(IntegerDivOpTest, NoActivation) {
+ IntegerDivOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-2, 2, -15, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {5, -2, -3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, -1, 5, 1}));
+}
+
+TEST(IntegerDivOpTest, ActivationRELU_N1_TO_1) {
+ IntegerDivOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
+ m.PopulateTensor<int32_t>(m.input1(), {-2, 2, -12, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, -15, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 1, 0, 1}));
+}
+
+TEST(IntegerDivOpTest, VariousInputShapes) {
+ std::vector<std::vector<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerDivOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 3, 8, 11, -20});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 6, 5, -11, -1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-20, 1, 0, 1, -1, 20}))
+ << "With shape number " << i;
+ }
+}
+
+TEST(IntegerDivOpTest, WithBroadcast) {
+ std::vector<std::vector<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerDivOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, // always a scalar
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 21, 7, 8, 11, -123});
+ m.PopulateTensor<int32_t>(m.input2(), {3});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-6, 7, 2, 2, 3, -41}))
+ << "With shape number " << i;
+ }
+}
+
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/eigen_support.cc b/tensorflow/contrib/lite/kernels/eigen_support.cc
index 4f0d020793..e542ad0765 100644
--- a/tensorflow/contrib/lite/kernels/eigen_support.cc
+++ b/tensorflow/contrib/lite/kernels/eigen_support.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include <utility>
+#include "tensorflow/contrib/lite/arena_planner.h"
#include "tensorflow/contrib/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/contrib/lite/kernels/op_macros.h"
@@ -23,6 +24,16 @@ namespace tflite {
namespace eigen_support {
namespace {
+#ifndef EIGEN_DONT_ALIGN
+// Eigen may require buffers to be algiend to 16, 32 or 64 bytes depending on
+// hardware architecture and build configurations.
+// If the static assertion fails, try to increase `kDefaultTensorAlignment` to
+// in `arena_planner.h` to 32 or 64.
+static_assert(
+ kDefaultTensorAlignment % EIGEN_MAX_ALIGN_BYTES == 0,
+ "kDefaultArenaAlignment doesn't comply with Eigen alignment requirement.");
+#endif // EIGEN_DONT_ALIGN
+
// We have a single global threadpool for all convolution operations. This means
// that inferences started from different threads may block each other, but
// since the underlying resource of CPU cores should be consumed by the
diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup.cc b/tensorflow/contrib/lite/kernels/embedding_lookup.cc
index 0ba170a4da..b2dff87e62 100644
--- a/tensorflow/contrib/lite/kernels/embedding_lookup.cc
+++ b/tensorflow/contrib/lite/kernels/embedding_lookup.cc
@@ -29,7 +29,6 @@ limitations under the License.
// When indices are out of bound, the ops will not succeed.
//
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -112,8 +111,9 @@ TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
// TODO(alanchiao): refactor scalar multiply into separate function
// for ease of adding a neon equivalent if ever necessary.
for (int j = 0; j < col_size; j++) {
+ const int8_t* value_ptr = reinterpret_cast<int8_t*>(value->data.uint8);
output->data.f[j + i * col_size] =
- value->data.uint8[j + idx * col_size] * scaling_factor;
+ value_ptr[j + idx * col_size] * scaling_factor;
}
}
}
diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc b/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
index 04657fd863..4a88d168c6 100644
--- a/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
+++ b/tensorflow/contrib/lite/kernels/embedding_lookup_test.cc
@@ -107,9 +107,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTest) {
HybridEmbeddingLookupOpModel m({3}, {3, 8});
m.SetInput({1, 0, 2});
m.SetWeight({
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
});
m.Invoke();
@@ -117,9 +117,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple2DTest) {
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
},
7.41e-03)));
}
@@ -128,9 +128,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTest) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 4});
m.SetInput({1, 0, 2});
m.SetWeight({
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
});
m.Invoke();
@@ -138,9 +138,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple3DTest) {
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
},
7.41e-03)));
}
@@ -149,9 +149,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTest) {
HybridEmbeddingLookupOpModel m({3}, {3, 2, 2, 2});
m.SetInput({1, 0, 2});
m.SetWeight({
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
});
m.Invoke();
@@ -159,9 +159,9 @@ TEST(HybridEmbeddingLookupHybridOpTest, Simple4DTest) {
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
- 1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
- 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
- 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
+ 1.00, -1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13, // Row 1
+ 0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13, // Row 0
+ 2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13, // Row 2
},
7.41e-03)));
}
diff --git a/tensorflow/contrib/lite/kernels/fake_quant.cc b/tensorflow/contrib/lite/kernels/fake_quant.cc
index f8927a0799..0ef1a50b30 100644
--- a/tensorflow/contrib/lite/kernels/fake_quant.cc
+++ b/tensorflow/contrib/lite/kernels/fake_quant.cc
@@ -44,6 +44,17 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+ const auto* params =
+ reinterpret_cast<TfLiteFakeQuantParams*>(node->builtin_data);
+
+ if (params->narrow_range) {
+ context->ReportError(
+ context,
+ "narrow_range FakeQuant is not currently supported at runtime. "
+ "narrow_range is only meant to be applied to weights, not activations");
+ return kTfLiteError;
+ }
+
OpContext op_context(context, node);
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(op_context.input->dims);
op_context.output->type = op_context.input->type;
diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc
index 6c9a845bd1..bc370608c0 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected.cc
@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -71,7 +70,7 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
// Instead, we allocate a new object to carry information from Prepare() to
// Eval().
gemm_support::IncrementUsageCounter(context);
- auto* op_data = new OpData;
+ auto* op_data = new OpData();
context->AddTensors(context, 1, &op_data->input_quantized_index);
return op_data;
}
@@ -152,10 +151,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
}
- // Resize output to the same as input (except the last dimension which is
- // determined by the number of units).
- TfLiteIntArray* output_size_array = TfLiteIntArrayCopy(input->dims);
- output_size_array->data[input->dims->size - 1] = num_units;
+ // Resize output.
+ TfLiteIntArray* output_size_array = TfLiteIntArrayCreate(2);
+ output_size_array->data[0] = batch_size;
+ output_size_array->data[1] = num_units;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size_array));
return kTfLiteOk;
diff --git a/tensorflow/contrib/lite/kernels/fully_connected_test.cc b/tensorflow/contrib/lite/kernels/fully_connected_test.cc
index a6b6b2f497..ec94905697 100644
--- a/tensorflow/contrib/lite/kernels/fully_connected_test.cc
+++ b/tensorflow/contrib/lite/kernels/fully_connected_test.cc
@@ -207,7 +207,6 @@ class FloatFullyConnectedOpModel : public BaseFullyConnectedOpModel {
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
- std::vector<int> GetOutputSize() { return GetTensorShape(output_); }
};
class QuantizedFullyConnectedOpModel : public BaseFullyConnectedOpModel {
@@ -299,7 +298,6 @@ class HybridFullyConnectedOpModel : public SingleOpModel {
void SetInput(const std::vector<float>& f) { PopulateTensor(input_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
- std::vector<int> GetOutputSize() { return GetTensorShape(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
@@ -374,7 +372,6 @@ TEST_P(FloatFullyConnectedOpTest, SimpleTest) {
m.Invoke();
- EXPECT_THAT(m.GetOutputSize(), ElementsAre(2, 3));
EXPECT_THAT(m.GetOutput(), ElementsAre(24, 25, 26, 58, 59, 60));
}
@@ -393,7 +390,6 @@ TEST_P(FloatFullyConnectedOpTest, SimpleTest2) {
m.Invoke();
- EXPECT_THAT(m.GetOutputSize(), ElementsAre(2, 1));
EXPECT_THAT(m.GetOutput(), ElementsAre(11, 9));
}
@@ -580,10 +576,11 @@ TEST(HybridFullyConnectedOpTest, SimpleTestQuantized) {
TEST_P(FloatFullyConnectedOpTest, SimpleTest4DInput) {
// Note that it is not required that the first dimension be the number of
- // batches. All we care is that the input size is the last dimension.
+ // batches. All we care is that the input can be evenly distributed in
+ // batches. In this case, we need the input to have multiples of '2'.
FloatFullyConnectedOpModel m(GetRegistration(),
/*units=*/3, /*batches=*/2,
- /*input=*/{TensorType_FLOAT32, {1, 2, 1, 10}});
+ /*input=*/{TensorType_FLOAT32, {4, 1, 5, 1}});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 0
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // u = 1
@@ -598,7 +595,6 @@ TEST_P(FloatFullyConnectedOpTest, SimpleTest4DInput) {
m.Invoke();
- EXPECT_THAT(m.GetOutputSize(), ElementsAre(1, 2, 1, 3));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
24, 25, 26, // first batch
58, 59, 60, // second batch
@@ -608,7 +604,7 @@ TEST_P(FloatFullyConnectedOpTest, SimpleTest4DInput) {
TEST_P(QuantizedFullyConnectedOpTest, SimpleTest4dInputQuantized) {
QuantizedFullyConnectedOpModel m(
GetRegistration(), /*units=*/3, /*batches=*/2,
- /*input=*/{TensorType_UINT8, {1, 2, 1, 10}, -63.5, 64},
+ /*input=*/{TensorType_UINT8, {4, 1, 5, 1}, -63.5, 64},
/*output=*/{TensorType_UINT8, {}, -127, 128});
// input_product_scale < output_scale was not true.
diff --git a/tensorflow/contrib/lite/kernels/hashtable_lookup.cc b/tensorflow/contrib/lite/kernels/hashtable_lookup.cc
index 41211d41aa..f37c66acb3 100644
--- a/tensorflow/contrib/lite/kernels/hashtable_lookup.cc
+++ b/tensorflow/contrib/lite/kernels/hashtable_lookup.cc
@@ -31,7 +31,6 @@ limitations under the License.
// Each item indicates whether the corresponding lookup has a returned value.
// 0 for missing key, 1 for found key.
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/kernels/internal/BUILD b/tensorflow/contrib/lite/kernels/internal/BUILD
index 7962fcbc9d..3a855fe3dd 100644
--- a/tensorflow/contrib/lite/kernels/internal/BUILD
+++ b/tensorflow/contrib/lite/kernels/internal/BUILD
@@ -232,6 +232,7 @@ cc_library(
cc_test(
name = "tensor_test",
srcs = ["tensor_test.cc"],
+ tags = ["no_oss"],
deps = [
":reference",
"@com_google_googletest//:gtest",
@@ -260,6 +261,7 @@ cc_library(
cc_test(
name = "quantization_util_test",
srcs = ["quantization_util_test.cc"],
+ tags = ["no_oss"],
deps = [
":quantization_util",
"@com_google_googletest//:gtest",
@@ -505,7 +507,10 @@ cc_test(
"//conditions:default": [],
}),
linkstatic = 1,
- tags = ["tflite_not_portable_ios"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable_ios",
+ ],
deps = [
":tensor_utils",
"//tensorflow/contrib/lite:builtin_op_data",
@@ -517,6 +522,7 @@ cc_test(
cc_test(
name = "depthwiseconv_float_test",
srcs = ["depthwiseconv_float_test.cc"],
+ tags = ["no_oss"],
deps = [
":optimized_base",
":reference_base",
@@ -529,6 +535,7 @@ cc_test(
cc_test(
name = "depthwiseconv_quantized_test",
srcs = ["depthwiseconv_quantized_test.cc"],
+ tags = ["no_oss"],
deps = [
":optimized_base",
":reference_base",
@@ -541,7 +548,10 @@ cc_test(
cc_test(
name = "resize_bilinear_test",
srcs = ["resize_bilinear_test.cc"],
- tags = ["tflite_not_portable"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable",
+ ],
deps = [
":optimized_base",
":reference_base",
@@ -557,6 +567,7 @@ cc_test(
srcs = [
"softmax_quantized_test.cc",
],
+ tags = ["no_oss"],
deps = [
":optimized_base",
":quantization_util",
@@ -572,7 +583,10 @@ cc_test(
srcs = [
"logsoftmax_quantized_test.cc",
],
- tags = ["tflite_not_portable"],
+ tags = [
+ "no_oss",
+ "tflite_not_portable",
+ ],
deps = [
":optimized_base",
":quantization_util",
@@ -585,6 +599,7 @@ cc_test(
cc_test(
name = "log_quantized_test",
srcs = ["log_quantized_test.cc"],
+ tags = ["no_oss"],
deps = [
":optimized_base",
":reference_base",
@@ -611,6 +626,7 @@ cc_library(
cc_test(
name = "batch_to_space_nd_test",
srcs = ["batch_to_space_nd_test.cc"],
+ tags = ["no_oss"],
deps = [
":optimized_base",
"@com_google_googletest//:gtest_main",
diff --git a/tensorflow/contrib/lite/kernels/internal/common.h b/tensorflow/contrib/lite/kernels/internal/common.h
index b86ca49c11..310a8980e6 100644
--- a/tensorflow/contrib/lite/kernels/internal/common.h
+++ b/tensorflow/contrib/lite/kernels/internal/common.h
@@ -127,6 +127,139 @@ int CountLeadingZeros(T integer_input) {
return leading_zeros;
}
+// DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
+// BROADCASTING.
+//
+// NdArrayDesc<N> describes the shape and memory layout of an N-dimensional
+// rectangular array of numbers.
+//
+// NdArrayDesc<N> is basically identical to Dims<N> defined in types.h.
+// However, as Dims<N> is to be deprecated, this class exists as an adaptor
+// to enable simple unoptimized implementations of element-wise broadcasting
+// operations.
+template <int N>
+struct NdArrayDesc {
+ // The "extent" of each dimension. Indices along dimension d must be in the
+ // half-open interval [0, extents[d]).
+ int extents[N];
+
+ // The number of *elements* (not bytes) between consecutive indices of each
+ // dimension.
+ int strides[N];
+};
+
+// DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
+// BROADCASTING.
+//
+// Same as Offset(), except takes as NdArrayDesc<N> instead of Dims<N>.
+inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
+ int i3) {
+ TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
+ TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
+ TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
+ TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
+ return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
+ i3 * desc.strides[3];
+}
+
+// Given the dimensions of the operands for an element-wise binary broadcast,
+// adjusts them so that they can be directly iterated over with simple loops.
+// Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
+// 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr.
+//
+// This function assumes that the two input shapes are compatible up to
+// broadcasting and the shorter one has already been prepended with 1s to be the
+// same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64),
+// shape1 must already have been prepended to be (1, 1, 1, 64). Recall that
+// Dims<N> refer to shapes in reverse order. In this case, input0_dims will be
+// (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1).
+//
+// When two shapes are compatible up to broadcasting, for each dimension d,
+// the input extents are either equal, or one of them is 1.
+//
+// This function performs the following for each dimension d:
+// - If the extents are equal, then do nothing since the loop that walks over
+// both of the input arrays is correct.
+// - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1
+// and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows
+// array0 to be referenced *at any index* in dimension d and still access the
+// same slice.
+template <int N>
+inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
+ const Dims<N>& input1_dims,
+ NdArrayDesc<N>* desc0_out,
+ NdArrayDesc<N>* desc1_out) {
+ TFLITE_DCHECK(desc0_out != nullptr);
+ TFLITE_DCHECK(desc1_out != nullptr);
+
+ // Copy dims to desc.
+ for (int i = 0; i < N; ++i) {
+ desc0_out->extents[i] = input0_dims.sizes[i];
+ desc0_out->strides[i] = input0_dims.strides[i];
+ desc1_out->extents[i] = input1_dims.sizes[i];
+ desc1_out->strides[i] = input1_dims.strides[i];
+ }
+
+ // Walk over each dimension. If the extents are equal do nothing.
+ // Otherwise, set the desc with extent 1 to have extent equal to the other and
+ // stride 0.
+ for (int i = 0; i < N; ++i) {
+ const int extent0 = ArraySize(input0_dims, i);
+ const int extent1 = ArraySize(input1_dims, i);
+ if (extent0 != extent1) {
+ if (extent0 == 1) {
+ desc0_out->strides[i] = 0;
+ desc0_out->extents[i] = extent1;
+ } else {
+ TFLITE_DCHECK_EQ(extent1, 1);
+ desc1_out->strides[i] = 0;
+ desc1_out->extents[i] = extent0;
+ }
+ }
+ }
+}
+
+template <int N>
+inline void NdArrayDescsForElementwiseBroadcast(
+ const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
+ NdArrayDesc<N>* desc0_out, NdArrayDesc<N>* desc1_out) {
+ TFLITE_DCHECK(desc0_out != nullptr);
+ TFLITE_DCHECK(desc1_out != nullptr);
+
+ auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
+ auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
+
+ // Copy dims to desc, calculating strides.
+ int desc0_stride = 1;
+ int desc1_stride = 1;
+ for (int i = N - 1; i >= 0; --i) {
+ desc0_out->extents[i] = extended_input0_shape.Dims(i);
+ desc0_out->strides[i] = desc0_stride;
+ desc0_stride *= extended_input0_shape.Dims(i);
+ desc1_out->extents[i] = extended_input1_shape.Dims(i);
+ desc1_out->strides[i] = desc1_stride;
+ desc1_stride *= extended_input1_shape.Dims(i);
+ }
+
+ // Walk over each dimension. If the extents are equal do nothing.
+ // Otherwise, set the desc with extent 1 to have extent equal to the other and
+ // stride 0.
+ for (int i = 0; i < N; ++i) {
+ const int extent0 = extended_input0_shape.Dims(i);
+ const int extent1 = extended_input1_shape.Dims(i);
+ if (extent0 != extent1) {
+ if (extent0 == 1) {
+ desc0_out->strides[i] = 0;
+ desc0_out->extents[i] = extent1;
+ } else {
+ TFLITE_DCHECK_EQ(extent1, 1);
+ desc1_out->strides[i] = 0;
+ desc1_out->extents[i] = extent0;
+ }
+ }
+ }
+}
+
} // namespace tflite
#endif // TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_COMMON_H_
diff --git a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
index a0e382edb6..200f2f1515 100644
--- a/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/kernel_utils.cc
@@ -255,14 +255,6 @@ void LstmStep(
output_state_ptr);
}
-// TODO(alanchiao): move this to tensor_utils.
-void VectorMultiply(const int8_t* vector, const int v_size, const float scale,
- float* result) {
- for (int i = 0; i < v_size; ++i) {
- *result++ = scale * *vector++;
- }
-}
-
void LstmStep(
const float* input_ptr_batch, const int8_t* input_to_input_weights_ptr,
float input_to_input_weights_scale,
@@ -415,8 +407,9 @@ void LstmStep(
// For each batch and cell: update input gate.
if (!use_cifg) {
if (use_peephole && !is_cell_state_all_zeros) {
- VectorMultiply(cell_to_input_weights_ptr, n_cell,
- cell_to_input_weights_scale, recovered_cell_weights);
+ tensor_utils::VectorScalarMultiply(cell_to_input_weights_ptr, n_cell,
+ cell_to_input_weights_scale,
+ recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
input_gate_scratch);
@@ -427,8 +420,9 @@ void LstmStep(
// For each batch and cell: update forget gate.
if (use_peephole && !is_cell_state_all_zeros) {
- VectorMultiply(cell_to_forget_weights_ptr, n_cell,
- cell_to_forget_weights_scale, recovered_cell_weights);
+ tensor_utils::VectorScalarMultiply(cell_to_forget_weights_ptr, n_cell,
+ cell_to_forget_weights_scale,
+ recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
forget_gate_scratch);
@@ -459,8 +453,9 @@ void LstmStep(
tensor_utils::IsZeroVector(cell_state_ptr, n_batch * n_cell);
// For each batch and cell: update the output gate.
if (use_peephole && !is_cell_state_all_zeros) {
- VectorMultiply(cell_to_output_weights_ptr, n_cell,
- cell_to_output_weights_scale, recovered_cell_weights);
+ tensor_utils::VectorScalarMultiply(cell_to_output_weights_ptr, n_cell,
+ cell_to_output_weights_scale,
+ recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state_ptr, n_batch,
output_gate_scratch);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h
index 6db41d7961..d5503073a7 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/legacy_optimized_ops.h
@@ -55,6 +55,245 @@ inline void Relu(const float* input_data, const Dims<4>& input_dims,
DimsToShape(output_dims));
}
+// legacy, for compatibility with old checked-in code
+template <FusedActivationFunctionType Ac>
+void Add(const float* input1_data, const Dims<4>& input1_dims,
+ const float* input2_data, const Dims<4>& input2_dims,
+ float* output_data, const Dims<4>& output_dims) {
+ float output_activation_min, output_activation_max;
+ GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
+
+ tflite::ArithmeticParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void Add(int left_shift, const uint8* input1_data,
+ const Dims<4>& input1_dims, int32 input1_offset,
+ int32 input1_multiplier, int input1_shift,
+ const uint8* input2_data, const Dims<4>& input2_dims,
+ int32 input2_offset, int32 input2_multiplier, int input2_shift,
+ int32 output_offset, int32 output_multiplier, int output_shift,
+ int32 output_activation_min, int32 output_activation_max,
+ uint8* output_data, const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, 0);
+ TFLITE_DCHECK_EQ(output_activation_max, 255);
+ }
+
+ tflite::ArithmeticParams op_params;
+ op_params.left_shift = left_shift;
+ op_params.input1_offset = input1_offset;
+ op_params.input1_multiplier = input1_multiplier;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_offset = input2_offset;
+ op_params.input2_multiplier = input2_multiplier;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.output_offset = output_offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = kReverseShift * output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+void Add(const int32* input1_data, const Dims<4>& input1_dims,
+ const int32* input2_data, const Dims<4>& input2_dims,
+ int32* output_data, const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label("Add/int32");
+ TFLITE_DCHECK(Ac == FusedActivationFunctionType::kNone);
+
+ tflite::ArithmeticParams op_params;
+ op_params.quantized_activation_min = std::numeric_limits<int32>::min();
+ op_params.quantized_activation_max = std::numeric_limits<int32>::max();
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <typename T>
+void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T output_activation_min, T output_activation_max,
+ T* output_data, const Dims<4>& output_dims) {
+ tflite::ArithmeticParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+ BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data,
+ DimsToShape(output_dims), output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void BroadcastAdd(int left_shift, const uint8* input1_data,
+ const Dims<4>& input1_dims, int32 input1_offset,
+ int32 input1_multiplier, int input1_shift,
+ const uint8* input2_data, const Dims<4>& input2_dims,
+ int32 input2_offset, int32 input2_multiplier,
+ int input2_shift, int32 output_offset,
+ int32 output_multiplier, int output_shift,
+ int32 output_activation_min,
+ int32 output_activation_max, uint8* output_data,
+ const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, 0);
+ TFLITE_DCHECK_EQ(output_activation_max, 255);
+ }
+
+ tflite::ArithmeticParams op_params;
+ op_params.left_shift = left_shift;
+ op_params.input1_offset = input1_offset;
+ op_params.input1_multiplier = input1_multiplier;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_offset = input2_offset;
+ op_params.input2_multiplier = input2_multiplier;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.output_offset = output_offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = kReverseShift * output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data,
+ DimsToShape(output_dims), output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void BroadcastAddFivefold(
+ int y0, int y1, int y2, int y3, int y4, int left_shift,
+ const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
+ int32 input1_multiplier, int input1_shift, const uint8* input2_data,
+ const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
+ int input2_shift, int32 output_offset, int32 output_multiplier,
+ int output_shift, int32 output_activation_min, int32 output_activation_max,
+ uint8* output_data, const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, 0);
+ TFLITE_DCHECK_EQ(output_activation_max, 255);
+ }
+ tflite::ArithmeticParams op_params;
+ op_params.broadcast_category =
+ tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
+ op_params.left_shift = left_shift;
+ op_params.input1_offset = input1_offset;
+ op_params.input1_multiplier = input1_multiplier;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_offset = input2_offset;
+ op_params.input2_multiplier = input2_multiplier;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.output_offset = output_offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = kReverseShift * output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ op_params.broadcast_shape[4] = y0;
+ op_params.broadcast_shape[3] = y1;
+ op_params.broadcast_shape[2] = y2;
+ op_params.broadcast_shape[1] = y3;
+ op_params.broadcast_shape[0] = y4;
+ BroadcastAddFivefold(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data,
+ DimsToShape(output_dims), output_data);
+}
+
+// legacy, for compatibility with old checked-in code
+template <FusedActivationFunctionType Ac, typename T>
+void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T* output_data, const Dims<4>& output_dims) {
+ T output_activation_min, output_activation_max;
+ GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
+
+ BroadcastAdd(input1_data, input1_dims, input2_data, input2_dims,
+ output_activation_min, output_activation_max, output_data,
+ output_dims);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
+ int input1_shift, const int16* input2_data,
+ const Dims<4>& input2_dims, int input2_shift,
+ int16 output_activation_min, int16 output_activation_max,
+ int16* output_data, const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, -32768);
+ TFLITE_DCHECK_EQ(output_activation_max, 32767);
+ }
+
+ tflite::ArithmeticParams op_params;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
+ const float* input2_data, const Dims<4>& input2_dims,
+ float* output_data, const Dims<4>& output_dims) {
+ float output_activation_min, output_activation_max;
+ GetActivationMinMax(FusedActivationFunctionType::kNone,
+ &output_activation_min, &output_activation_max);
+ tflite::ArithmeticParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+ Sub(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <typename T>
+void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
+ const Dims<4>& input2_dims, T* output_data,
+ const Dims<4>& output_dims) {
+ T output_activation_min, output_activation_max;
+ GetActivationMinMax(FusedActivationFunctionType::kNone,
+ &output_activation_min, &output_activation_max);
+ tflite::ArithmeticParams op_params;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ Sub(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
index 8c57c987d7..420bc68b43 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.cc
@@ -342,6 +342,77 @@ void NeonClipVector(const float* vector, int v_size, float abs_limit,
}
}
+void NeonVectorScalarMultiply(const int8_t* vector, const int v_size,
+ const float scale, float* result) {
+ // Here the assumption is that each buffer is 4-byte aligned.
+ const int kWeightsPerUint32 = 4;
+ TFLITE_CHECK_EQ((intptr_t)(&vector[0]) & (kWeightsPerUint32 - 1), 0);
+ // If v_size is not divisible by kWeightsPerNeonLane, we cannot use the main
+ // vectorized loop, and we need to process sequentially. postamble_start shows
+ // the start index where this should happen.
+ const int kWeightsPerNeonLane = 16;
+ const int postamble_start = v_size - (v_size & (kWeightsPerNeonLane - 1));
+
+ // Create a vector of 4 floats with the scale value.
+ const float32x4_t scale_f32x4 = vdupq_n_f32(scale);
+ int v = 0;
+ for (; v < postamble_start; v += kWeightsPerNeonLane) {
+ // Load int8 values, sixteen at a time.
+ const int8x16_t v_i8x16 = vld1q_s8(vector + v);
+ // Split it into two components of size eight.
+ const int8x8_t v0_i8x8 = vget_low_s8(v_i8x16);
+ const int8x8_t v1_i8x8 = vget_high_s8(v_i8x16);
+ // Convert both components to int16 first.
+ const int16x8_t v0_i16x8 = vmovl_s8(v0_i8x8);
+ const int16x8_t v1_i16x8 = vmovl_s8(v1_i8x8);
+ // Split each of them into two components each.
+ const int16x4_t v0_i16x4 = vget_low_s16(v0_i16x8);
+ const int16x4_t v1_i16x4 = vget_high_s16(v0_i16x8);
+ const int16x4_t v2_i16x4 = vget_low_s16(v1_i16x8);
+ const int16x4_t v3_i16x4 = vget_high_s16(v1_i16x8);
+ // Convert these to int32 and then to float.
+ float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
+ float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
+ float32x4_t v2_f32x4 = vcvtq_f32_s32(vmovl_s16(v2_i16x4));
+ float32x4_t v3_f32x4 = vcvtq_f32_s32(vmovl_s16(v3_i16x4));
+ // Vector multiply four floats at a time.
+ v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
+ v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
+ v2_f32x4 = vmulq_f32(v2_f32x4, scale_f32x4);
+ v3_f32x4 = vmulq_f32(v3_f32x4, scale_f32x4);
+ // Store the results.
+ vst1q_f32(result + v, v0_f32x4);
+ vst1q_f32(result + v + 4, v1_f32x4);
+ vst1q_f32(result + v + 8, v2_f32x4);
+ vst1q_f32(result + v + 12, v3_f32x4);
+ }
+
+ if (v_size - postamble_start >= (kWeightsPerNeonLane >> 1)) {
+ // Load eight int8 values, if there is at least eight remaining.
+ const int8x8_t v_i8x8 = vld1_s8(vector + v);
+ // Convert them to int16 first.
+ const int16x8_t v_i16x8 = vmovl_s8(v_i8x8);
+ // Split it into two components.
+ const int16x4_t v0_i16x4 = vget_low_s16(v_i16x8);
+ const int16x4_t v1_i16x4 = vget_high_s16(v_i16x8);
+ // Convert the components two floats.
+ float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
+ float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
+ // Vector multiply four floats at a time.
+ v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
+ v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
+ // Store the results.
+ vst1q_f32(result + v, v0_f32x4);
+ vst1q_f32(result + v + 4, v1_f32x4);
+ v += (kWeightsPerNeonLane >> 1);
+ }
+
+ // Postamble loop.
+ for (; v < v_size; v++) {
+ result[v] = scale * vector[v];
+ }
+}
+
void NeonSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min,
float* max, float* scaling_factor) {
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
index 7a5a8fc541..45c9f65b64 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/neon_tensor_utils.h
@@ -105,6 +105,10 @@ bool IsZeroVector(const float* vector, int v_size) {
return NEON_OR_PORTABLE(IsZeroVector, vector, v_size);
}
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result) {
+ NEON_OR_PORTABLE(VectorScalarMultiply, vector, v_size, scale, result);
+}
void ClipVector(const float* vector, int v_size, float abs_limit,
float* result) {
NEON_OR_PORTABLE(ClipVector, vector, v_size, abs_limit, result);
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
index c857fdf699..78567d52ea 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h
@@ -42,10 +42,12 @@ namespace optimized_ops {
// Unoptimized reference ops:
using reference_ops::ArgMax;
using reference_ops::ArgMinMax;
+using reference_ops::BroadcastAdd4DSlow;
using reference_ops::BroadcastGreater;
using reference_ops::BroadcastGreaterEqual;
using reference_ops::BroadcastLess;
using reference_ops::BroadcastLessEqual;
+using reference_ops::BroadcastSub4DSlow;
using reference_ops::Concatenation;
using reference_ops::DepthConcatenation;
using reference_ops::Dequantize;
@@ -217,98 +219,6 @@ SaturatingRoundingMultiplyByPOTParam(
SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
}
-// DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING ELEMENT-WISE
-// BROADCASTING.
-//
-// NdArrayDesc<N> describes the shape and memory layout of an N-dimensional
-// rectangular array of numbers.
-//
-// NdArrayDesc<N> is basically identical to Dims<N> defined in types.h.
-// However, as Dims<N> is to be deprecated, this class exists as an adaptor
-// to enable simple unoptimized implementations of element-wise broadcasting
-// operations.
-template <int N>
-struct NdArrayDesc {
- // The "extent" of each dimension. Indices along dimension d must be in the
- // half-open interval [0, extents[d]).
- int extents[N];
-
- // The number of *elements* (not bytes) between consecutive indices of each
- // dimension.
- int strides[N];
-};
-
-// DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
-// ELEMENT-WISE BROADCASTING.
-//
-// Same as Offset(), except takes as NdArrayDesc<N> instead of Dims<N>.
-inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
- int i3) {
- TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
- TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
- TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
- TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
- return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
- i3 * desc.strides[3];
-}
-
-// Given the dimensions of the operands for an element-wise binary broadcast,
-// adjusts them so that they can be directly iterated over with simple loops.
-// Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
-// 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr.
-//
-// This function assumes that the two input shapes are compatible up to
-// broadcasting and the shorter one has already been prepended with 1s to be the
-// same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64),
-// shape1 must already have been prepended to be (1, 1, 1, 64). Recall that
-// Dims<N> refer to shapes in reverse order. In this case, input0_dims will be
-// (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1).
-//
-// When two shapes are compatible up to broadcasting, for each dimension d,
-// the input extents are either equal, or one of them is 1.
-//
-// This function performs the following for each dimension d:
-// - If the extents are equal, then do nothing since the loop that walks over
-// both of the input arrays is correct.
-// - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1
-// and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows
-// array0 to be referenced *at any index* in dimension d and still access the
-// same slice.
-template <int N>
-inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
- const Dims<N>& input1_dims,
- NdArrayDesc<N>* desc0_out,
- NdArrayDesc<N>* desc1_out) {
- TFLITE_DCHECK(desc0_out != nullptr);
- TFLITE_DCHECK(desc1_out != nullptr);
-
- // Copy dims to desc.
- for (int i = 0; i < N; ++i) {
- desc0_out->extents[i] = input0_dims.sizes[i];
- desc0_out->strides[i] = input0_dims.strides[i];
- desc1_out->extents[i] = input1_dims.sizes[i];
- desc1_out->strides[i] = input1_dims.strides[i];
- }
-
- // Walk over each dimension. If the extents are equal do nothing.
- // Otherwise, set the desc with extent 1 to have extent equal to the other and
- // stride 0.
- for (int i = 0; i < N; ++i) {
- const int extent0 = ArraySize(input0_dims, i);
- const int extent1 = ArraySize(input1_dims, i);
- if (extent0 != extent1) {
- if (extent0 == 1) {
- desc0_out->strides[i] = 0;
- desc0_out->extents[i] = extent1;
- } else {
- TFLITE_DCHECK_EQ(extent1, 1);
- desc1_out->strides[i] = 0;
- desc1_out->extents[i] = extent0;
- }
- }
- }
-}
-
inline bool AreSameDims(const Dims<4>& dims1, const Dims<4>& dims2) {
for (int i = 0; i < 4; i++) {
if (dims1.sizes[i] != dims2.sizes[i]) {
@@ -2478,20 +2388,17 @@ inline void L2Normalization(const uint8* input_data,
}
}
-inline void Add(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const float* input1_data,
+ const RuntimeShape& input2_shape, const float* input2_data,
+ const RuntimeShape& output_shape, float* output_data) {
gemmlowp::ScopedProfilingLabel label("Add");
- TFLITE_DCHECK(IsPackedWithoutStrides(input1_dims));
- TFLITE_DCHECK(IsPackedWithoutStrides(input2_dims));
- TFLITE_DCHECK(IsPackedWithoutStrides(output_dims));
int i = 0;
- const int size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+ const int size = MatchingFlatSize(input1_shape, input2_shape, output_shape);
#ifdef USE_NEON
- const auto activation_min = vdupq_n_f32(output_activation_min);
- const auto activation_max = vdupq_n_f32(output_activation_max);
+ const auto activation_min = vdupq_n_f32(params.float_activation_min);
+ const auto activation_max = vdupq_n_f32(params.float_activation_max);
for (; i <= size - 16; i += 16) {
auto a10 = vld1q_f32(input1_data + i);
auto a11 = vld1q_f32(input1_data + i + 4);
@@ -2530,29 +2437,26 @@ inline void Add(const float* input1_data, const Dims<4>& input1_dims,
for (; i < size; i++) {
auto x = input1_data[i] + input2_data[i];
- output_data[i] = ActivationFunctionWithMinMax(x, output_activation_min,
- output_activation_max);
+ output_data[i] = ActivationFunctionWithMinMax(
+ x, params.float_activation_min, params.float_activation_max);
}
}
// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.
-inline void AddElementwise(int size, int left_shift, const uint8* input1_data,
- int32 input1_offset, int32 input1_multiplier,
- int input1_shift, const uint8* input2_data,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data) {
+inline void AddElementwise(int size, const ArithmeticParams& params,
+ const uint8* input1_data, const uint8* input2_data,
+ uint8* output_data) {
int i = 0;
- TFLITE_DCHECK_GT(input1_offset, -256);
- TFLITE_DCHECK_GT(input2_offset, -256);
- TFLITE_DCHECK_LT(input1_offset, 256);
- TFLITE_DCHECK_LT(input2_offset, 256);
+ TFLITE_DCHECK_GT(params.input1_offset, -256);
+ TFLITE_DCHECK_GT(params.input2_offset, -256);
+ TFLITE_DCHECK_LT(params.input1_offset, 256);
+ TFLITE_DCHECK_LT(params.input2_offset, 256);
#ifdef USE_NEON
- const auto output_activation_min_vector = vdup_n_u8(output_activation_min);
- const auto output_activation_max_vector = vdup_n_u8(output_activation_max);
+ const auto output_activation_min_vector =
+ vdup_n_u8(params.quantized_activation_min);
+ const auto output_activation_max_vector =
+ vdup_n_u8(params.quantized_activation_max);
for (; i <= size - 8; i += 8) {
const auto input1_val_original = vld1_u8(input1_data + i);
const auto input2_val_original = vld1_u8(input2_data + i);
@@ -2561,9 +2465,9 @@ inline void AddElementwise(int size, int left_shift, const uint8* input1_data,
const auto input2_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input2_val_original));
const auto input1_val =
- vaddq_s16(input1_val_s16, vdupq_n_s16(input1_offset));
+ vaddq_s16(input1_val_s16, vdupq_n_s16(params.input1_offset));
const auto input2_val =
- vaddq_s16(input2_val_s16, vdupq_n_s16(input2_offset));
+ vaddq_s16(input2_val_s16, vdupq_n_s16(params.input2_offset));
const auto input1_val_high = vget_high_s16(input1_val);
const auto input1_val_low = vget_low_s16(input1_val);
const auto input2_val_high = vget_high_s16(input2_val);
@@ -2572,32 +2476,32 @@ inline void AddElementwise(int size, int left_shift, const uint8* input1_data,
auto x12 = vmovl_s16(input1_val_high);
auto x21 = vmovl_s16(input2_val_low);
auto x22 = vmovl_s16(input2_val_high);
- const auto left_shift_dup = vdupq_n_s32(left_shift);
+ const auto left_shift_dup = vdupq_n_s32(params.left_shift);
x11 = vshlq_s32(x11, left_shift_dup);
x12 = vshlq_s32(x12, left_shift_dup);
x21 = vshlq_s32(x21, left_shift_dup);
x22 = vshlq_s32(x22, left_shift_dup);
- x11 = vqrdmulhq_n_s32(x11, input1_multiplier);
- x12 = vqrdmulhq_n_s32(x12, input1_multiplier);
- x21 = vqrdmulhq_n_s32(x21, input2_multiplier);
- x22 = vqrdmulhq_n_s32(x22, input2_multiplier);
- const auto input1_shift_dup = vdupq_n_s32(-input1_shift);
- const auto input2_shift_dup = vdupq_n_s32(-input2_shift);
+ x11 = vqrdmulhq_n_s32(x11, params.input1_multiplier);
+ x12 = vqrdmulhq_n_s32(x12, params.input1_multiplier);
+ x21 = vqrdmulhq_n_s32(x21, params.input2_multiplier);
+ x22 = vqrdmulhq_n_s32(x22, params.input2_multiplier);
+ const auto input1_shift_dup = vdupq_n_s32(params.input1_shift);
+ const auto input2_shift_dup = vdupq_n_s32(params.input2_shift);
x11 = vshlq_s32(x11, input1_shift_dup);
x12 = vshlq_s32(x12, input1_shift_dup);
x21 = vshlq_s32(x21, input2_shift_dup);
x22 = vshlq_s32(x22, input2_shift_dup);
auto s1 = vaddq_s32(x11, x21);
auto s2 = vaddq_s32(x12, x22);
- s1 = vqrdmulhq_n_s32(s1, output_multiplier);
- s2 = vqrdmulhq_n_s32(s2, output_multiplier);
+ s1 = vqrdmulhq_n_s32(s1, params.output_multiplier);
+ s2 = vqrdmulhq_n_s32(s2, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
- s1 = RoundingDivideByPOT(s1, output_shift);
- s2 = RoundingDivideByPOT(s2, output_shift);
+ s1 = RoundingDivideByPOT(s1, -params.output_shift);
+ s2 = RoundingDivideByPOT(s2, -params.output_shift);
const auto s1_narrowed = vmovn_s32(s1);
const auto s2_narrowed = vmovn_s32(s2);
const auto s = vaddq_s16(vcombine_s16(s1_narrowed, s2_narrowed),
- vdupq_n_s16(output_offset));
+ vdupq_n_s16(params.output_offset));
const auto clamped =
vmax_u8(output_activation_min_vector,
vmin_u8(output_activation_max_vector, vqmovun_s16(s)));
@@ -2606,101 +2510,74 @@ inline void AddElementwise(int size, int left_shift, const uint8* input1_data,
#endif // NEON
for (; i < size; ++i) {
- const int32 input1_val = input1_offset + input1_data[i];
- const int32 input2_val = input2_offset + input2_data[i];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
+ const int32 input1_val = params.input1_offset + input1_data[i];
+ const int32 input2_val = params.input2_offset + input2_data[i];
+ const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
+ const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
+ shifted_input1_val, params.input1_multiplier, params.input1_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
+ shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sum, output_multiplier, kReverseShift * output_shift) +
- output_offset;
- const int32 clamped_output = std::min(
- output_activation_max, std::max(output_activation_min, raw_output));
+ raw_sum, params.output_multiplier, params.output_shift) +
+ params.output_offset;
+ const int32 clamped_output =
+ std::min(params.quantized_activation_max,
+ std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<uint8>(clamped_output);
}
}
-// legacy, for compatibility with old checked-in code
-template <FusedActivationFunctionType Ac>
-void Add(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float* output_data, const Dims<4>& output_dims) {
- float output_activation_min, output_activation_max;
- GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
-
- Add(input1_data, input1_dims, input2_data, input2_dims, output_activation_min,
- output_activation_max, output_data, output_dims);
-}
-
-template <FusedActivationFunctionType Ac>
-inline void Add(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier, int input2_shift,
- int32 output_offset, int32 output_multiplier, int output_shift,
- int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, 0);
- TFLITE_DCHECK_EQ(output_activation_max, 255);
- }
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const uint8* input1_data,
+ const RuntimeShape& input2_shape, const uint8* input2_data,
+ const RuntimeShape& output_shape, uint8* output_data) {
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
gemmlowp::ScopedProfilingLabel label("Add/8bit");
- const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
- TFLITE_DCHECK(IsPackedWithoutStrides(input1_dims));
- TFLITE_DCHECK(IsPackedWithoutStrides(input2_dims));
- TFLITE_DCHECK(IsPackedWithoutStrides(output_dims));
-
- TFLITE_DCHECK_GT(input1_offset, -256);
- TFLITE_DCHECK_GT(input2_offset, -256);
- TFLITE_DCHECK_LT(input1_offset, 256);
- TFLITE_DCHECK_LT(input2_offset, 256);
- AddElementwise(flat_size, left_shift, input1_data, input1_offset,
- input1_multiplier, input1_shift, input2_data, input2_offset,
- input2_multiplier, input2_shift, output_offset,
- output_multiplier, output_shift, output_activation_min,
- output_activation_max, output_data);
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, output_shape);
+
+ TFLITE_DCHECK_GT(params.input1_offset, -256);
+ TFLITE_DCHECK_GT(params.input2_offset, -256);
+ TFLITE_DCHECK_LT(params.input1_offset, 256);
+ TFLITE_DCHECK_LT(params.input2_offset, 256);
+ AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
-inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
- int input1_shift, const int16* input2_data,
- const Dims<4>& input2_dims, int input2_shift,
- int16 output_activation_min, int16 output_activation_max,
- int16* output_data, const Dims<4>& output_dims) {
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const int16* input1_data,
+ const RuntimeShape& input2_shape, const int16* input2_data,
+ const RuntimeShape& output_shape, int16* output_data) {
gemmlowp::ScopedProfilingLabel label("Add/Int16");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
- const int flat_size = MatchingFlatSize(output_dims, input1_dims, input2_dims);
+ const int input1_shift = params.input1_shift;
+ const int flat_size =
+ MatchingFlatSize(output_shape, input1_shape, input2_shape);
+ const int16 output_activation_min = params.quantized_activation_min;
+ const int16 output_activation_max = params.quantized_activation_max;
- TFLITE_DCHECK(input1_shift == 0 || input2_shift == 0);
- TFLITE_DCHECK_GE(input1_shift, 0);
- TFLITE_DCHECK_GE(input2_shift, 0);
+ TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
+ TFLITE_DCHECK_LE(input1_shift, 0);
+ TFLITE_DCHECK_LE(params.input2_shift, 0);
const int16* not_shift_input = input1_shift == 0 ? input1_data : input2_data;
const int16* shift_input = input1_shift == 0 ? input2_data : input1_data;
- const int input_shift = input1_shift == 0 ? input2_shift : input1_shift;
+ const int input_right_shift =
+ input1_shift == 0 ? -params.input2_shift : -input1_shift;
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
- F0 scaled_input =
- F0::FromRaw(gemmlowp::RoundingDivideByPOT(shift_input[i], input_shift));
+ F0 scaled_input = F0::FromRaw(
+ gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
const int16 raw_output = result.raw();
const int16 clamped_output = std::min(
@@ -2709,195 +2586,59 @@ inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
}
}
-inline void Add(const int32* input1_data, const Dims<4>& input1_dims,
- const int32* input2_data, const Dims<4>& input2_dims,
- int32 output_activation_min, int32 output_activation_max,
- int32* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("Add/int32");
-
- const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
- for (int i = 0; i < flat_size; ++i) {
- output_data[i] = ActivationFunctionWithMinMax(
- input1_data[i] + input2_data[i], output_activation_min,
- output_activation_max);
- }
-}
-
-template <FusedActivationFunctionType Ac>
-inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
- int input1_shift, const int16* input2_data,
- const Dims<4>& input2_dims, int input2_shift,
- int16 output_activation_min, int16 output_activation_max,
- int16* output_data, const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, -32768);
- TFLITE_DCHECK_EQ(output_activation_max, 32767);
- }
-
- Add(input1_data, input1_dims, input1_shift, input2_data, input2_dims,
- input2_shift, output_activation_min, output_activation_max, output_data,
- output_dims);
-}
-
-template <FusedActivationFunctionType Ac>
-void Add(const int32* input1_data, const Dims<4>& input1_dims,
- const int32* input2_data, const Dims<4>& input2_dims,
- int32* output_data, const Dims<4>& output_dims) {
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const int32* input1_data,
+ const RuntimeShape& input2_shape, const int32* input2_data,
+ const RuntimeShape& output_shape, int32* output_data) {
gemmlowp::ScopedProfilingLabel label("Add/int32");
- TFLITE_DCHECK(Ac == FusedActivationFunctionType::kNone);
- auto input1_map = MapAsVector(input1_data, input1_dims);
- auto input2_map = MapAsVector(input2_data, input2_dims);
- auto output_map = MapAsVector(output_data, output_dims);
- if (AreSameDims(input1_dims, input2_dims)) {
+ auto input1_map = MapAsVector(input1_data, input1_shape);
+ auto input2_map = MapAsVector(input2_data, input2_shape);
+ auto output_map = MapAsVector(output_data, output_shape);
+ if (input1_shape == input2_shape) {
output_map.array() = input1_map.array() + input2_map.array();
- } else if (FlatSize(input2_dims) == 1) {
+ } else if (input2_shape.FlatSize() == 1) {
auto scalar = input2_data[0];
output_map.array() = input1_map.array() + scalar;
- } else if (FlatSize(input1_dims) == 1) {
+ } else if (input1_shape.FlatSize() == 1) {
auto scalar = input1_data[0];
output_map.array() = scalar + input2_map.array();
} else {
// Should not come here.
TFLITE_DCHECK(false);
}
+ output_map = output_map.cwiseMax(params.quantized_activation_min);
+ output_map = output_map.cwiseMin(params.quantized_activation_max);
}
-// TODO(jiawen): We can implement BroadcastAdd on buffers of arbitrary
-// dimensionality if the runtime code does a single loop over one dimension
-// that handles broadcasting as the base case. The code generator would then
-// generate max(D1, D2) nested for loops.
-// TODO(benoitjacob): BroadcastAdd is intentionally duplicated from
-// reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
-// is no longer referenced in this file, move NdArrayDesc<T> from types.h to
-// reference_ops.h.
-template <typename T>
-void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T output_activation_min, T output_activation_max,
- T* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastAdd");
-
- NdArrayDesc<4> desc1;
- NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- output_data[Offset(output_dims, c, x, y, b)] =
- ActivationFunctionWithMinMax(
- input1_data[SubscriptToIndex(desc1, c, x, y, b)] +
- input2_data[SubscriptToIndex(desc2, c, x, y, b)],
- output_activation_min, output_activation_max);
- }
- }
- }
- }
-}
-
-// legacy, for compatibility with old checked-in code
-template <FusedActivationFunctionType Ac, typename T>
-void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T* output_data, const Dims<4>& output_dims) {
- T output_activation_min, output_activation_max;
- GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
-
- BroadcastAdd(input1_data, input1_dims, input2_data, input2_dims,
- output_activation_min, output_activation_max, output_data,
- output_dims);
-}
-
-inline void BroadcastAdd(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastAddGeneric/8bit");
-
- NdArrayDesc<4> desc1;
- NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- const int32 input1_val =
- input1_offset + input1_data[SubscriptToIndex(desc1, c, x, y, b)];
- const int32 input2_val =
- input2_offset + input2_data[SubscriptToIndex(desc2, c, x, y, b)];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
- const int32 scaled_input1_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
- const int32 scaled_input2_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
- const int32 raw_sum = scaled_input1_val + scaled_input2_val;
- const int32 raw_output =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sum, output_multiplier, kReverseShift * output_shift) +
- output_offset;
- const int32 clamped_output =
- std::min(output_activation_max,
- std::max(output_activation_min, raw_output));
- output_data[Offset(output_dims, c, x, y, b)] =
- static_cast<uint8>(clamped_output);
- }
- }
- }
- }
-}
-
-inline void BroadcastAddFivefold(
- int y0, int y1, int y2, int y3, int y4, int left_shift,
- const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift, const uint8* input2_data,
- const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset, int32 output_multiplier,
- int output_shift, int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const Dims<4>& output_dims) {
+inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
+ const RuntimeShape& unswitched_input1_shape,
+ const uint8* unswitched_input1_data,
+ const RuntimeShape& unswitched_input2_shape,
+ const uint8* unswitched_input2_data,
+ const RuntimeShape& output_shape,
+ uint8* output_data) {
gemmlowp::ScopedProfilingLabel label("BroadcastAddFivefold/8bit");
+ ArithmeticParams switched_params = unswitched_params;
+ switched_params.input1_offset = unswitched_params.input2_offset;
+ switched_params.input1_multiplier = unswitched_params.input2_multiplier;
+ switched_params.input1_shift = unswitched_params.input2_shift;
+ switched_params.input2_offset = unswitched_params.input1_offset;
+ switched_params.input2_multiplier = unswitched_params.input1_multiplier;
+ switched_params.input2_shift = unswitched_params.input1_shift;
+
+ const bool use_unswitched =
+ unswitched_params.broadcast_category ==
+ tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
+
+ const ArithmeticParams& params =
+ use_unswitched ? unswitched_params : switched_params;
+ const uint8* input1_data =
+ use_unswitched ? unswitched_input1_data : unswitched_input2_data;
+ const uint8* input2_data =
+ use_unswitched ? unswitched_input2_data : unswitched_input1_data;
+
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
@@ -2905,82 +2646,29 @@ inline void BroadcastAddFivefold(
uint8* output_data_ptr = output_data;
const uint8* input1_data_ptr = input1_data;
const uint8* input2_data_reset = input2_data;
- for (int i4 = 0; i4 < y4; ++i4) {
+ int y0 = params.broadcast_shape[0];
+ int y1 = params.broadcast_shape[1];
+ int y2 = params.broadcast_shape[2];
+ int y3 = params.broadcast_shape[3];
+ int y4 = params.broadcast_shape[4];
+ for (int i0 = 0; i0 < y0; ++i0) {
const uint8* input2_data_ptr;
- for (int i3 = 0; i3 < y3; ++i3) {
+ for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
- for (int i1 = 0; i1 < y1; ++i1) {
- AddElementwise(
- y0, left_shift, input1_data_ptr, input1_offset, input1_multiplier,
- input1_shift, input2_data_ptr, input2_offset, input2_multiplier,
- input2_shift, output_offset, output_multiplier, output_shift,
- output_activation_min, output_activation_max, output_data_ptr);
- input2_data_ptr += y0;
- output_data_ptr += y0;
+ for (int i3 = 0; i3 < y3; ++i3) {
+ AddElementwise(y4, params, input1_data_ptr, input2_data_ptr,
+ output_data_ptr);
+ input2_data_ptr += y4;
+ output_data_ptr += y4;
}
- input1_data_ptr += y0;
+ input1_data_ptr += y4;
}
}
input2_data_reset = input2_data_ptr;
}
}
-template <FusedActivationFunctionType Ac>
-inline void BroadcastAdd(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, 0);
- TFLITE_DCHECK_EQ(output_activation_max, 255);
- }
- BroadcastAdd(left_shift, input1_data, input1_dims, input1_offset,
- input1_multiplier, input1_shift, input2_data, input2_dims,
- input2_offset, input2_multiplier, input2_shift, output_offset,
- output_multiplier, output_shift, output_activation_min,
- output_activation_max, output_data, output_dims);
-}
-
-template <FusedActivationFunctionType Ac>
-inline void BroadcastAddFivefold(
- int y0, int y1, int y2, int y3, int y4, int left_shift,
- const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift, const uint8* input2_data,
- const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset, int32 output_multiplier,
- int output_shift, int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, 0);
- TFLITE_DCHECK_EQ(output_activation_max, 255);
- }
- BroadcastAddFivefold(y0, y1, y2, y3, y4, left_shift, input1_data, input1_dims,
- input1_offset, input1_multiplier, input1_shift,
- input2_data, input2_dims, input2_offset,
- input2_multiplier, input2_shift, output_offset,
- output_multiplier, output_shift, output_activation_min,
- output_activation_max, output_data, output_dims);
-}
-
inline void Mul(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float output_activation_min, float output_activation_max,
@@ -3305,122 +2993,78 @@ void BroadcastDiv(const T* input1_data, const Dims<4>& input1_dims,
}
// TODO(aselle): This is not actually optimized yet.
-inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("Sub");
- const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+inline void SubNonBroadcast(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const float* input1_data,
+ const RuntimeShape& input2_shape,
+ const float* input2_data,
+ const RuntimeShape& output_shape,
+ float* output_data) {
+ gemmlowp::ScopedProfilingLabel label("SubNonBroadcast");
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
- input1_data[i] - input2_data[i], output_activation_min,
- output_activation_max);
+ input1_data[i] - input2_data[i], params.float_activation_min,
+ params.float_activation_max);
}
}
-// TODO(jiawen): We can implement BroadcastSub on buffers of arbitrary
-// dimensionality if the runtime code does a single loop over one dimension
-// that handles broadcasting as the base case. The code generator would then
-// generate max(D1, D2) nested for loops.
-// TODO(benoitjacob): BroadcastSub is intentionally duplicated from
-// reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
-// is no longer referenced in this file, move NdArrayDesc<T> from types.h to
-// reference_ops.h.
-template <typename T>
-void BroadcastSub(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T output_activation_min, T output_activation_max,
- T* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastSub");
-
- NdArrayDesc<4> desc1;
- NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- output_data[Offset(output_dims, c, x, y, b)] =
- ActivationFunctionWithMinMax(
- input1_data[SubscriptToIndex(desc1, c, x, y, b)] -
- input2_data[SubscriptToIndex(desc2, c, x, y, b)],
- output_activation_min, output_activation_max);
- }
- }
- }
+inline void SubWithActivation(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const int32* input1_data,
+ const RuntimeShape& input2_shape,
+ const int32* input2_data,
+ const RuntimeShape& output_shape,
+ int32* output_data) {
+ gemmlowp::ScopedProfilingLabel label("SubWithActivation/int32");
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, input2_shape);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] - input2_data[i], params.quantized_activation_min,
+ params.quantized_activation_max);
}
}
-inline void BroadcastSub(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastSub/8bit");
+inline void SubWithActivation(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const float* input1_data,
+ const RuntimeShape& input2_shape,
+ const float* input2_data,
+ const RuntimeShape& output_shape,
+ float* output_data) {
+ gemmlowp::ScopedProfilingLabel label("SubWithActivation/float");
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, input2_shape);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] - input2_data[i], params.float_activation_min,
+ params.float_activation_max);
+ }
+}
- NdArrayDesc<4> desc1;
- NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+template <typename T>
+void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape,
+ const T* input1_data, const RuntimeShape& input2_shape,
+ const T* input2_data, const RuntimeShape& output_shape,
+ T* output_data) {
+ gemmlowp::ScopedProfilingLabel label("Sub");
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- const int32 input1_val =
- input1_offset + input1_data[SubscriptToIndex(desc1, c, x, y, b)];
- const int32 input2_val =
- input2_offset + input2_data[SubscriptToIndex(desc2, c, x, y, b)];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
- const int32 scaled_input1_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
- const int32 scaled_input2_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
- const int32 raw_sub = scaled_input1_val - scaled_input2_val;
- const int32 raw_output =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sub, output_multiplier, kReverseShift * output_shift) +
- output_offset;
- const int32 clamped_output =
- std::min(output_activation_max,
- std::max(output_activation_min, raw_output));
- output_data[Offset(output_dims, c, x, y, b)] =
- static_cast<uint8>(clamped_output);
- }
- }
- }
+ auto input1_map = MapAsVector(input1_data, input1_shape);
+ auto input2_map = MapAsVector(input2_data, input2_shape);
+ auto output_map = MapAsVector(output_data, output_shape);
+ if (input1_shape == input2_shape) {
+ output_map.array() = input1_map.array() - input2_map.array();
+ } else if (input1_shape.FlatSize() == 1) {
+ auto scalar = input1_data[0];
+ output_map.array() = scalar - input2_map.array();
+ } else if (input2_shape.FlatSize() == 1) {
+ auto scalar = input2_data[0];
+ output_map.array() = input1_map.array() - scalar;
+ } else {
+ BroadcastSub4DSlow(params, input1_shape, input1_data, input2_shape,
+ input2_data, output_shape, output_data);
}
}
@@ -5863,63 +5507,6 @@ inline void Slice(const T* input_data, const Dims<4>& input_dims,
}
template <typename T>
-void GenericBroadcastSub(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("GenericBroadcastSub");
-
- NdArrayDesc<4> desc1;
- NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- output_data[Offset(output_dims, c, x, y, b)] =
- input1_data[SubscriptToIndex(desc1, c, x, y, b)] -
- input2_data[SubscriptToIndex(desc2, c, x, y, b)];
- }
- }
- }
- }
-}
-
-template <typename T>
-void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
- const Dims<4>& input2_dims, T* output_data,
- const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("Sub");
-
- auto input1_map = MapAsVector(input1_data, input1_dims);
- auto input2_map = MapAsVector(input2_data, input2_dims);
- auto output_map = MapAsVector(output_data, output_dims);
- if (AreSameDims(input1_dims, input2_dims)) {
- output_map.array() = input1_map.array() - input2_map.array();
- } else if (FlatSize(input1_dims) == 1) {
- auto scalar = input1_data[0];
- output_map.array() = scalar - input2_map.array();
- } else if (FlatSize(input2_dims) == 1) {
- auto scalar = input2_data[0];
- output_map.array() = input1_map.array() - scalar;
- } else {
- GenericBroadcastSub(input1_data, input1_dims, input2_data, input2_dims,
- output_data, output_dims);
- }
-}
-
-template <typename T>
void TensorFlowMinimum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
diff --git a/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h b/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
index f14667090f..db7926df9a 100644
--- a/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
+++ b/tensorflow/contrib/lite/kernels/internal/optimized/tensor_utils_impl.h
@@ -124,6 +124,12 @@ void PortableCopyVector(const float* vector, int v_size, float* result);
// Fill vector with 0.f.
void PortableZeroVector(float* vector, int v_size);
+// Multiply all elements of vector with a scalar.
+void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+void NeonVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+
// Limit a float input f between +abs_limit and -abs_limit.
float PortableClip(float f, float abs_limit);
diff --git a/tensorflow/contrib/lite/kernels/internal/quantization_util.h b/tensorflow/contrib/lite/kernels/internal/quantization_util.h
index 525857a2e6..9b3f1823dc 100644
--- a/tensorflow/contrib/lite/kernels/internal/quantization_util.h
+++ b/tensorflow/contrib/lite/kernels/internal/quantization_util.h
@@ -28,8 +28,9 @@ namespace tflite {
// Given the min and max values of a float array, return
// reasonable quantization parameters to use for this array.
template <typename T>
-QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
- const T qmin = std::numeric_limits<T>::min();
+QuantizationParams ChooseQuantizationParams(double rmin, double rmax,
+ bool narrow_range) {
+ const T qmin = std::numeric_limits<T>::min() + (narrow_range ? 1 : 0);
const T qmax = std::numeric_limits<T>::max();
const double qmin_double = qmin;
const double qmax_double = qmax;
@@ -97,6 +98,11 @@ QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
return quantization_params;
}
+template <typename T>
+QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
+ return ChooseQuantizationParams<T>(rmin, rmax, false);
+}
+
// Converts a floating-point number to an integer. For all inputs x where
// static_cast<IntOut>(x) is legal according to the C++ standard, the result
// is identical to that cast (i.e. the result is x with its fractional part
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h
index f715d34bc1..bcf5e4e4f6 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/legacy_reference_ops.h
@@ -63,6 +63,240 @@ inline void Relu6(const float* input_data, const Dims<4>& input_dims,
DimsToShape(output_dims));
}
+template <FusedActivationFunctionType Ac>
+inline void Add(int left_shift, const uint8* input1_data,
+ const Dims<4>& input1_dims, int32 input1_offset,
+ int32 input1_multiplier, int input1_shift,
+ const uint8* input2_data, const Dims<4>& input2_dims,
+ int32 input2_offset, int32 input2_multiplier, int input2_shift,
+ int32 output_offset, int32 output_multiplier, int output_shift,
+ int32 output_activation_min, int32 output_activation_max,
+ uint8* output_data, const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, 0);
+ TFLITE_DCHECK_EQ(output_activation_max, 255);
+ }
+
+ tflite::ArithmeticParams op_params;
+ op_params.left_shift = left_shift;
+ op_params.input1_offset = input1_offset;
+ op_params.input1_multiplier = input1_multiplier;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_offset = input2_offset;
+ op_params.input2_multiplier = input2_multiplier;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.output_offset = output_offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = kReverseShift * output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+void Add(const int32* input1_data, const Dims<4>& input1_dims,
+ const int32* input2_data, const Dims<4>& input2_dims,
+ int32* output_data, const Dims<4>& output_dims) {
+ gemmlowp::ScopedProfilingLabel label("Add/int32");
+ TFLITE_DCHECK(Ac == FusedActivationFunctionType::kNone);
+
+ tflite::ArithmeticParams op_params;
+ op_params.quantized_activation_min = std::numeric_limits<int32>::min();
+ op_params.quantized_activation_max = std::numeric_limits<int32>::max();
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void BroadcastAdd(int left_shift, const uint8* input1_data,
+ const Dims<4>& input1_dims, int32 input1_offset,
+ int32 input1_multiplier, int input1_shift,
+ const uint8* input2_data, const Dims<4>& input2_dims,
+ int32 input2_offset, int32 input2_multiplier,
+ int input2_shift, int32 output_offset,
+ int32 output_multiplier, int output_shift,
+ int32 output_activation_min,
+ int32 output_activation_max, uint8* output_data,
+ const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, 0);
+ TFLITE_DCHECK_EQ(output_activation_max, 255);
+ }
+
+ tflite::ArithmeticParams op_params;
+ op_params.left_shift = left_shift;
+ op_params.input1_offset = input1_offset;
+ op_params.input1_multiplier = input1_multiplier;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_offset = input2_offset;
+ op_params.input2_multiplier = input2_multiplier;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.output_offset = output_offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = kReverseShift * output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data,
+ DimsToShape(output_dims), output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+void Add(const float* input1_data, const Dims<4>& input1_dims,
+ const float* input2_data, const Dims<4>& input2_dims,
+ float* output_data, const Dims<4>& output_dims) {
+ float output_activation_min, output_activation_max;
+ GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
+
+ tflite::ArithmeticParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <typename T>
+void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T output_activation_min, T output_activation_max,
+ T* output_data, const Dims<4>& output_dims) {
+ tflite::ArithmeticParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+ BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data,
+ DimsToShape(output_dims), output_data);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void BroadcastAddFivefold(
+ int y0, int y1, int y2, int y3, int y4, int left_shift,
+ const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
+ int32 input1_multiplier, int input1_shift, const uint8* input2_data,
+ const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
+ int input2_shift, int32 output_offset, int32 output_multiplier,
+ int output_shift, int32 output_activation_min, int32 output_activation_max,
+ uint8* output_data, const Dims<4>& output_dims) {
+ constexpr int kReverseShift = -1;
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, 0);
+ TFLITE_DCHECK_EQ(output_activation_max, 255);
+ }
+ tflite::ArithmeticParams op_params;
+ op_params.broadcast_category =
+ tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
+ op_params.left_shift = left_shift;
+ op_params.input1_offset = input1_offset;
+ op_params.input1_multiplier = input1_multiplier;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_offset = input2_offset;
+ op_params.input2_multiplier = input2_multiplier;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.output_offset = output_offset;
+ op_params.output_multiplier = output_multiplier;
+ op_params.output_shift = kReverseShift * output_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ op_params.broadcast_shape[4] = y0;
+ op_params.broadcast_shape[3] = y1;
+ op_params.broadcast_shape[2] = y2;
+ op_params.broadcast_shape[1] = y3;
+ op_params.broadcast_shape[0] = y4;
+ BroadcastAddFivefold(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data,
+ DimsToShape(output_dims), output_data);
+}
+
+// legacy, for compatibility with old checked-in code
+template <FusedActivationFunctionType Ac, typename T>
+void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T* output_data, const Dims<4>& output_dims) {
+ T output_activation_min, output_activation_max;
+ GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
+
+ BroadcastAdd(input1_data, input1_dims, input2_data, input2_dims,
+ output_activation_min, output_activation_max, output_data,
+ output_dims);
+}
+
+template <FusedActivationFunctionType Ac>
+inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
+ int input1_shift, const int16* input2_data,
+ const Dims<4>& input2_dims, int input2_shift,
+ int16 output_activation_min, int16 output_activation_max,
+ int16* output_data, const Dims<4>& output_dims) {
+ static_assert(Ac == FusedActivationFunctionType::kNone ||
+ Ac == FusedActivationFunctionType::kRelu ||
+ Ac == FusedActivationFunctionType::kRelu6 ||
+ Ac == FusedActivationFunctionType::kRelu1,
+ "");
+ TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+ if (Ac == FusedActivationFunctionType::kNone) {
+ TFLITE_DCHECK_EQ(output_activation_min, -32768);
+ TFLITE_DCHECK_EQ(output_activation_max, 32767);
+ }
+
+ tflite::ArithmeticParams op_params;
+ op_params.input1_shift = kReverseShift * input1_shift;
+ op_params.input2_shift = kReverseShift * input2_shift;
+ op_params.quantized_activation_min = output_activation_min;
+ op_params.quantized_activation_max = output_activation_max;
+ Add(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
+ const float* input2_data, const Dims<4>& input2_dims,
+ float* output_data, const Dims<4>& output_dims) {
+ float output_activation_min, output_activation_max;
+ GetActivationMinMax(FusedActivationFunctionType::kNone,
+ &output_activation_min, &output_activation_max);
+ tflite::ArithmeticParams op_params;
+ op_params.float_activation_min = output_activation_min;
+ op_params.float_activation_max = output_activation_max;
+ Sub(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
+template <typename T>
+void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
+ const Dims<4>& input2_dims, T* output_data,
+ const Dims<4>& output_dims) {
+ tflite::ArithmeticParams op_params;
+ op_params.quantized_activation_min = std::numeric_limits<T>::min();
+ op_params.quantized_activation_max = std::numeric_limits<T>::max();
+ Sub(op_params, DimsToShape(input1_dims), input1_data,
+ DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
+ output_data);
+}
+
inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
index ccf112c990..7ead449ca8 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
+++ b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.cc
@@ -195,6 +195,13 @@ void PortableZeroVector(float* vector, int v_size) {
memset(vector, 0, v_size * sizeof(float));
}
+void PortableVectorScalarMultiply(const int8_t* vector, const int v_size,
+ const float scale, float* result) {
+ for (int v = 0; v < v_size; ++v) {
+ *result++ = scale * *vector++;
+ }
+}
+
void PortableClipVector(const float* vector, int v_size, float abs_limit,
float* result) {
for (int v = 0; v < v_size; v++) {
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
index d2e1fecd25..d3a4fa8507 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/portable_tensor_utils.h
@@ -96,6 +96,10 @@ void PortableSub1Vector(const float* vector, int v_size, float* result);
// Fill vector with 0.f.
void PortableZeroVector(float* vector, int v_size);
+// Multiply all elements of vector with a scalar.
+void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+
// Clip elements of a vector using a abs_limit value.
void PortableClipVector(const float* vector, int v_size, float abs_limit,
float* result);
@@ -199,6 +203,12 @@ void ZeroVector(float* vector, int v_size) {
PortableZeroVector(vector, v_size);
}
+// Multiply all elements of vector with a scalar.
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result) {
+ PortableVectorScalarMultiply(vector, v_size, scale, result);
+}
+
void ClipVector(const float* vector, int v_size, float abs_limit,
float* result) {
PortableClipVector(vector, v_size, abs_limit, result);
diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
index 2d40f1769b..31a54c2b62 100644
--- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
+++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h
@@ -158,98 +158,6 @@ SaturatingRoundingMultiplyByPOTParam(
SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
}
-// DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING ELEMENT-WISE
-// BROADCASTING.
-//
-// NdArrayDesc<N> describes the shape and memory layout of an N-dimensional
-// rectangular array of numbers.
-//
-// NdArrayDesc<N> is basically identical to Dims<N> defined in types.h.
-// However, as Dims<N> is to be deprecated, this class exists as an adaptor
-// to enable simple unoptimized implementations of element-wise broadcasting
-// operations.
-template <int N>
-struct NdArrayDesc {
- // The "extent" of each dimension. Indices along dimension d must be in the
- // half-open interval [0, extents[d]).
- int extents[N];
-
- // The number of *elements* (not bytes) between consecutive indices of each
- // dimension.
- int strides[N];
-};
-
-// DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
-// ELEMENT-WISE BROADCASTING.
-//
-// Same as Offset(), except takes as NdArrayDesc<N> instead of Dims<N>.
-inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
- int i3) {
- TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
- TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
- TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
- TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
- return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
- i3 * desc.strides[3];
-}
-
-// Given the dimensions of the operands for an element-wise binary broadcast,
-// adjusts them so that they can be directly iterated over with simple loops.
-// Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
-// 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr.
-//
-// This function assumes that the two input shapes are compatible up to
-// broadcasting and the shorter one has already been prepended with 1s to be the
-// same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64),
-// shape1 must already have been prepended to be (1, 1, 1, 64). Recall that
-// Dims<N> refer to shapes in reverse order. In this case, input0_dims will be
-// (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1).
-//
-// When two shapes are compatible up to broadcasting, for each dimension d,
-// the input extents are either equal, or one of them is 1.
-//
-// This function performs the following for each dimension d:
-// - If the extents are equal, then do nothing since the loop that walks over
-// both of the input arrays is correct.
-// - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1
-// and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows
-// array0 to be referenced *at any index* in dimension d and still access the
-// same slice.
-template <int N>
-inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
- const Dims<N>& input1_dims,
- NdArrayDesc<N>* desc0_out,
- NdArrayDesc<N>* desc1_out) {
- TFLITE_DCHECK(desc0_out != nullptr);
- TFLITE_DCHECK(desc1_out != nullptr);
-
- // Copy dims to desc.
- for (int i = 0; i < N; ++i) {
- desc0_out->extents[i] = input0_dims.sizes[i];
- desc0_out->strides[i] = input0_dims.strides[i];
- desc1_out->extents[i] = input1_dims.sizes[i];
- desc1_out->strides[i] = input1_dims.strides[i];
- }
-
- // Walk over each dimension. If the extents are equal do nothing.
- // Otherwise, set the desc with extent 1 to have extent equal to the other and
- // stride 0.
- for (int i = 0; i < N; ++i) {
- const int extent0 = ArraySize(input0_dims, i);
- const int extent1 = ArraySize(input1_dims, i);
- if (extent0 != extent1) {
- if (extent0 == 1) {
- desc0_out->strides[i] = 0;
- desc0_out->extents[i] = extent1;
- } else {
- TFLITE_DCHECK_EQ(extent1, 1);
- desc1_out->strides[i] = 0;
- desc1_out->extents[i] = extent0;
- }
- }
- }
-}
-
inline void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
@@ -1065,114 +973,108 @@ inline void L2Normalization(const uint8* input_data,
}
template <typename T>
-inline void Add(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T output_activation_min, T output_activation_max,
- T* output_data, const Dims<4>& output_dims) {
- const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const T* input1_data,
+ const RuntimeShape& input2_shape, const T* input2_data,
+ const RuntimeShape& output_shape, T* output_data) {
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
- input1_data[i] + input2_data[i], output_activation_min,
- output_activation_max);
+ input1_data[i] + input2_data[i], params.quantized_activation_min,
+ params.quantized_activation_max);
}
}
-// legacy, for compatibility with old checked-in code
-template <FusedActivationFunctionType Ac>
-void Add(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float* output_data, const Dims<4>& output_dims) {
- float output_activation_min, output_activation_max;
- GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
-
- Add(input1_data, input1_dims, input2_data, input2_dims, output_activation_min,
- output_activation_max, output_data, output_dims);
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const float* input1_data,
+ const RuntimeShape& input2_shape, const float* input2_data,
+ const RuntimeShape& output_shape, float* output_data) {
+ const int size = MatchingFlatSize(input1_shape, input2_shape, output_shape);
+ for (int i = 0; i < size; i++) {
+ auto x = input1_data[i] + input2_data[i];
+ output_data[i] = ActivationFunctionWithMinMax(
+ x, params.float_activation_min, params.float_activation_max);
+ }
}
-template <FusedActivationFunctionType Ac>
-inline void Add(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier, int input2_shift,
- int32 output_offset, int32 output_multiplier, int output_shift,
- int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, 0);
- TFLITE_DCHECK_EQ(output_activation_max, 255);
- }
- const int batches =
- MatchingArraySize(input1_dims, 3, input2_dims, 3, output_dims, 3);
- const int height =
- MatchingArraySize(input1_dims, 2, input2_dims, 2, output_dims, 2);
- const int width =
- MatchingArraySize(input1_dims, 1, input2_dims, 1, output_dims, 1);
- const int depth =
- MatchingArraySize(input1_dims, 0, input2_dims, 0, output_dims, 0);
- for (int b = 0; b < batches; ++b) {
- for (int y = 0; y < height; ++y) {
- for (int x = 0; x < width; ++x) {
- for (int c = 0; c < depth; ++c) {
- const int32 input1_val =
- input1_offset + input1_data[Offset(input1_dims, c, x, y, b)];
- const int32 input2_val =
- input2_offset + input2_data[Offset(input2_dims, c, x, y, b)];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
- const int32 scaled_input1_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
- const int32 scaled_input2_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
- const int32 raw_sum = scaled_input1_val + scaled_input2_val;
- const int32 raw_output =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sum, output_multiplier, kReverseShift * output_shift) +
- output_offset;
- const int32 clamped_output =
- std::min(output_activation_max,
- std::max(output_activation_min, raw_output));
- output_data[Offset(output_dims, c, x, y, b)] =
- static_cast<uint8>(clamped_output);
- }
- }
- }
+// Element-wise add that can often be used for inner loop of broadcast add as
+// well as the non-broadcast add.
+inline void AddElementwise(int size, const ArithmeticParams& params,
+ const uint8* input1_data, const uint8* input2_data,
+ uint8* output_data) {
+ TFLITE_DCHECK_GT(params.input1_offset, -256);
+ TFLITE_DCHECK_GT(params.input2_offset, -256);
+ TFLITE_DCHECK_LT(params.input1_offset, 256);
+ TFLITE_DCHECK_LT(params.input2_offset, 256);
+
+ for (int i = 0; i < size; ++i) {
+ const int32 input1_val = params.input1_offset + input1_data[i];
+ const int32 input2_val = params.input2_offset + input2_data[i];
+ const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
+ const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
+ const int32 scaled_input1_val =
+ MultiplyByQuantizedMultiplierSmallerThanOneExp(
+ shifted_input1_val, params.input1_multiplier, params.input1_shift);
+ const int32 scaled_input2_val =
+ MultiplyByQuantizedMultiplierSmallerThanOneExp(
+ shifted_input2_val, params.input2_multiplier, params.input2_shift);
+ const int32 raw_sum = scaled_input1_val + scaled_input2_val;
+ const int32 raw_output =
+ MultiplyByQuantizedMultiplierSmallerThanOneExp(
+ raw_sum, params.output_multiplier, params.output_shift) +
+ params.output_offset;
+ const int32 clamped_output =
+ std::min(params.quantized_activation_max,
+ std::max(params.quantized_activation_min, raw_output));
+ output_data[i] = static_cast<uint8>(clamped_output);
}
}
-inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
- int input1_shift, const int16* input2_data,
- const Dims<4>& input2_dims, int input2_shift,
- int16 output_activation_min, int16 output_activation_max,
- int16* output_data, const Dims<4>& output_dims) {
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const uint8* input1_data,
+ const RuntimeShape& input2_shape, const uint8* input2_data,
+ const RuntimeShape& output_shape, uint8* output_data) {
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, output_shape);
- const int flat_size = MatchingFlatSize(output_dims, input1_dims, input2_dims);
+ TFLITE_DCHECK_GT(params.input1_offset, -256);
+ TFLITE_DCHECK_GT(params.input2_offset, -256);
+ TFLITE_DCHECK_LT(params.input1_offset, 256);
+ TFLITE_DCHECK_LT(params.input2_offset, 256);
+ AddElementwise(flat_size, params, input1_data, input2_data, output_data);
+}
+
+inline void Add(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const int16* input1_data,
+ const RuntimeShape& input2_shape, const int16* input2_data,
+ const RuntimeShape& output_shape, int16* output_data) {
+ TFLITE_DCHECK_LE(params.quantized_activation_min,
+ params.quantized_activation_max);
+
+ const int input1_shift = params.input1_shift;
+ const int flat_size =
+ MatchingFlatSize(output_shape, input1_shape, input2_shape);
+ const int16 output_activation_min = params.quantized_activation_min;
+ const int16 output_activation_max = params.quantized_activation_max;
- TFLITE_DCHECK(input1_shift == 0 || input2_shift == 0);
- TFLITE_DCHECK_GE(input1_shift, 0);
- TFLITE_DCHECK_GE(input2_shift, 0);
+ TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
+ TFLITE_DCHECK_LE(input1_shift, 0);
+ TFLITE_DCHECK_LE(params.input2_shift, 0);
const int16* not_shift_input = input1_shift == 0 ? input1_data : input2_data;
const int16* shift_input = input1_shift == 0 ? input2_data : input1_data;
- const int input_shift = input1_shift == 0 ? input2_shift : input1_shift;
+ const int input_right_shift =
+ input1_shift == 0 ? -params.input2_shift : -input1_shift;
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
- F0 scaled_input =
- F0::FromRaw(gemmlowp::RoundingDivideByPOT(shift_input[i], input_shift));
+ F0 scaled_input = F0::FromRaw(
+ gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
const int16 raw_output = result.raw();
const int16 clamped_output = std::min(
@@ -1181,42 +1083,28 @@ inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
}
}
-template <FusedActivationFunctionType Ac>
-inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
- int input1_shift, const int16* input2_data,
- const Dims<4>& input2_dims, int input2_shift,
- int16 output_activation_min, int16 output_activation_max,
- int16* output_data, const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, -32768);
- TFLITE_DCHECK_EQ(output_activation_max, 32767);
- }
-
- Add(input1_data, input1_dims, input1_shift, input2_data, input2_dims,
- input2_shift, output_activation_min, output_activation_max, output_data,
- output_dims);
-}
-
// TODO(jiawen): We can implement BroadcastAdd on buffers of arbitrary
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
// generate max(D1, D2) nested for loops.
-template <typename T>
-void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T output_activation_min, T output_activation_max,
- T* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastAdd");
-
+// TODO(benoitjacob): BroadcastAdd is intentionally duplicated from
+// reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
+// is no longer referenced in this file, move NdArrayDesc<T> from types.h to
+// reference_ops.h.
+inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const float* input1_data,
+ const RuntimeShape& input2_shape,
+ const float* input2_data,
+ const RuntimeShape& output_shape,
+ float* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/float");
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
@@ -1229,49 +1117,77 @@ void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- output_data[Offset(output_dims, c, x, y, b)] =
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax(
- input1_data[SubscriptToIndex(desc1, c, x, y, b)] +
- input2_data[SubscriptToIndex(desc2, c, x, y, b)],
- output_activation_min, output_activation_max);
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)],
+ params.float_activation_min, params.float_activation_max);
}
}
}
}
}
-// legacy, for compatibility with old checked-in code
-template <FusedActivationFunctionType Ac, typename T>
-void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T* output_data, const Dims<4>& output_dims) {
- T output_activation_min, output_activation_max;
- GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
+inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const int32* input1_data,
+ const RuntimeShape& input2_shape,
+ const int32* input2_data,
+ const RuntimeShape& output_shape,
+ int32* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/int32");
+ NdArrayDesc<4> desc1;
+ NdArrayDesc<4> desc2;
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
- BroadcastAdd(input1_data, input1_dims, input2_data, input2_dims,
- output_activation_min, output_activation_max, output_data,
- output_dims);
+ // In Tensorflow, the dimensions are canonically named (batch_number, row,
+ // col, channel), with extents (batches, height, width, depth), with the
+ // trailing dimension changing most rapidly (channels has the smallest stride,
+ // typically 1 element).
+ //
+ // In generated C code, we store arrays with the dimensions reversed. The
+ // first dimension has smallest stride.
+ //
+ // We name our variables by their Tensorflow convention, but generate C code
+ // nesting loops such that the innermost loop has the smallest stride for the
+ // best cache behavior.
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
+ ActivationFunctionWithMinMax(
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)],
+ params.quantized_activation_min,
+ params.quantized_activation_max);
+ }
+ }
+ }
+ }
}
-inline void BroadcastAdd(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastAdd/8bit");
-
+inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const uint8* input1_data,
+ const RuntimeShape& input2_shape,
+ const uint8* input2_data,
+ const RuntimeShape& output_shape,
+ uint8* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/uint8");
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
@@ -1284,33 +1200,37 @@ inline void BroadcastAdd(int left_shift, const uint8* input1_data,
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
const int32 input1_val =
- input1_offset + input1_data[SubscriptToIndex(desc1, c, x, y, b)];
+ params.input1_offset +
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)];
const int32 input2_val =
- input2_offset + input2_data[SubscriptToIndex(desc2, c, x, y, b)];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
+ params.input2_offset +
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)];
+ const int32 shifted_input1_val =
+ input1_val * (1 << params.left_shift);
+ const int32 shifted_input2_val =
+ input2_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
+ shifted_input1_val, params.input1_multiplier,
+ params.input1_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
+ shifted_input2_val, params.input2_multiplier,
+ params.input2_shift);
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sum, output_multiplier, kReverseShift * output_shift) +
- output_offset;
+ raw_sum, params.output_multiplier, params.output_shift) +
+ params.output_offset;
const int32 clamped_output =
- std::min(output_activation_max,
- std::max(output_activation_min, raw_output));
- output_data[Offset(output_dims, c, x, y, b)] =
+ std::min(params.quantized_activation_max,
+ std::max(params.quantized_activation_min, raw_output));
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
static_cast<uint8>(clamped_output);
}
}
@@ -1318,117 +1238,62 @@ inline void BroadcastAdd(int left_shift, const uint8* input1_data,
}
}
-inline void BroadcastAddFivefold(
- int y0, int y1, int y2, int y3, int y4, int left_shift,
- const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift, const uint8* input2_data,
- const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset, int32 output_multiplier,
- int output_shift, int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastAddFivefold/8bit");
-
- int sb1 = y0;
- int sa2 = y0;
- int sb2 = y0 * y1;
- int sa3 = y0 * y2;
- int sa4 = y0 * y2 * y3;
- int sb4 = y0 * y1 * y2;
-
+inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
+ const RuntimeShape& unswitched_input1_shape,
+ const uint8* unswitched_input1_data,
+ const RuntimeShape& unswitched_input2_shape,
+ const uint8* unswitched_input2_data,
+ const RuntimeShape& output_shape,
+ uint8* output_data) {
+ ArithmeticParams switched_params = unswitched_params;
+ switched_params.input1_offset = unswitched_params.input2_offset;
+ switched_params.input1_multiplier = unswitched_params.input2_multiplier;
+ switched_params.input1_shift = unswitched_params.input2_shift;
+ switched_params.input2_offset = unswitched_params.input1_offset;
+ switched_params.input2_multiplier = unswitched_params.input1_multiplier;
+ switched_params.input2_shift = unswitched_params.input1_shift;
+
+ const bool use_unswitched =
+ unswitched_params.broadcast_category ==
+ tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
+
+ const ArithmeticParams& params =
+ use_unswitched ? unswitched_params : switched_params;
+ const uint8* input1_data =
+ use_unswitched ? unswitched_input1_data : unswitched_input2_data;
+ const uint8* input2_data =
+ use_unswitched ? unswitched_input2_data : unswitched_input1_data;
+
+ // Fivefold nested loops. The second input resets its position for each
+ // iteration of the second loop. The first input resets its position at the
+ // beginning of the fourth loop. The innermost loop is an elementwise add of
+ // sections of the arrays.
uint8* output_data_ptr = output_data;
- for (int i4 = 0; i4 < y4; ++i4) {
- for (int i3 = 0; i3 < y3; ++i3) {
+ const uint8* input1_data_ptr = input1_data;
+ const uint8* input2_data_reset = input2_data;
+ int y0 = params.broadcast_shape[0];
+ int y1 = params.broadcast_shape[1];
+ int y2 = params.broadcast_shape[2];
+ int y3 = params.broadcast_shape[3];
+ int y4 = params.broadcast_shape[4];
+ for (int i0 = 0; i0 < y0; ++i0) {
+ const uint8* input2_data_ptr;
+ for (int i1 = 0; i1 < y1; ++i1) {
+ input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
- for (int i1 = 0; i1 < y1; ++i1) {
- for (int i0 = 0; i0 < y0; ++i0) {
- const int32 input1_val =
- input1_offset +
- input1_data[i4 * sa4 + i3 * sa3 + i2 * sa2 + i0];
- const int32 input2_val =
- input2_offset +
- input2_data[i4 * sb4 + i2 * sb2 + i1 * sb1 + i0];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
- const int32 scaled_input1_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
- const int32 scaled_input2_val =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
- const int32 raw_sum = scaled_input1_val + scaled_input2_val;
- const int32 raw_output =
- MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sum, output_multiplier, kReverseShift * output_shift) +
- output_offset;
- const int32 clamped_output =
- std::min(output_activation_max,
- std::max(output_activation_min, raw_output));
- *output_data_ptr = static_cast<uint8>(clamped_output);
- ++output_data_ptr;
- }
+ for (int i3 = 0; i3 < y3; ++i3) {
+ AddElementwise(y4, params, input1_data_ptr, input2_data_ptr,
+ output_data_ptr);
+ input2_data_ptr += y4;
+ output_data_ptr += y4;
}
+ input1_data_ptr += y4;
}
}
+ input2_data_reset = input2_data_ptr;
}
}
-template <FusedActivationFunctionType Ac>
-inline void BroadcastAdd(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, 0);
- TFLITE_DCHECK_EQ(output_activation_max, 255);
- }
- BroadcastAdd(left_shift, input1_data, input1_dims, input1_offset,
- input1_multiplier, input1_shift, input2_data, input2_dims,
- input2_offset, input2_multiplier, input2_shift, output_offset,
- output_multiplier, output_shift, output_activation_min,
- output_activation_max, output_data, output_dims);
-}
-
-template <FusedActivationFunctionType Ac>
-inline void BroadcastAddFivefold(
- int y0, int y1, int y2, int y3, int y4, int left_shift,
- const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift, const uint8* input2_data,
- const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset, int32 output_multiplier,
- int output_shift, int32 output_activation_min, int32 output_activation_max,
- uint8* output_data, const Dims<4>& output_dims) {
- static_assert(Ac == FusedActivationFunctionType::kNone ||
- Ac == FusedActivationFunctionType::kRelu ||
- Ac == FusedActivationFunctionType::kRelu6 ||
- Ac == FusedActivationFunctionType::kRelu1,
- "");
- TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
- if (Ac == FusedActivationFunctionType::kNone) {
- TFLITE_DCHECK_EQ(output_activation_min, 0);
- TFLITE_DCHECK_EQ(output_activation_max, 255);
- }
- BroadcastAddFivefold(y0, y1, y2, y3, y4, left_shift, input1_data, input1_dims,
- input1_offset, input1_multiplier, input1_shift,
- input2_data, input2_dims, input2_offset,
- input2_multiplier, input2_shift, output_offset,
- output_multiplier, output_shift, output_activation_min,
- output_activation_max, output_data, output_dims);
-}
-
template <typename T>
inline void Mul(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
@@ -1654,10 +1519,11 @@ void BroadcastDiv(const T* input1_data, const Dims<4>& input1_dims,
}
}
-inline void Div(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
+template <typename T>
+inline void Div(const T* input1_data, const Dims<4>& input1_dims,
+ const T* input2_data, const Dims<4>& input2_dims,
+ T output_activation_min, T output_activation_max,
+ T* output_data, const Dims<4>& output_dims) {
const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
@@ -1666,15 +1532,35 @@ inline void Div(const float* input1_data, const Dims<4>& input1_dims,
}
}
-inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
- const float* input2_data, const Dims<4>& input2_dims,
- float output_activation_min, float output_activation_max,
- float* output_data, const Dims<4>& output_dims) {
- const int flat_size = MatchingFlatSize(input1_dims, input2_dims, output_dims);
+inline void SubNonBroadcast(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const float* input1_data,
+ const RuntimeShape& input2_shape,
+ const float* input2_data,
+ const RuntimeShape& output_shape,
+ float* output_data) {
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
- input1_data[i] - input2_data[i], output_activation_min,
- output_activation_max);
+ input1_data[i] - input2_data[i], params.float_activation_min,
+ params.float_activation_max);
+ }
+}
+
+inline void SubNonBroadcast(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const int32* input1_data,
+ const RuntimeShape& input2_shape,
+ const int32* input2_data,
+ const RuntimeShape& output_shape,
+ int32* output_data) {
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, output_shape);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] - input2_data[i], params.quantized_activation_min,
+ params.quantized_activation_max);
}
}
@@ -1682,16 +1568,24 @@ inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
// generate max(D1, D2) nested for loops.
-template <typename T>
-void BroadcastSub(const T* input1_data, const Dims<4>& input1_dims,
- const T* input2_data, const Dims<4>& input2_dims,
- T output_activation_min, T output_activation_max,
- T* output_data, const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastSub");
-
+// TODO(benoitjacob): BroadcastSub is intentionally duplicated from
+// reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
+// is no longer referenced in this file, move NdArrayDesc<T> from types.h to
+// reference_ops.h.
+inline void BroadcastSub4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const float* input1_data,
+ const RuntimeShape& input2_shape,
+ const float* input2_data,
+ const RuntimeShape& output_shape,
+ float* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/float");
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
@@ -1704,36 +1598,35 @@ void BroadcastSub(const T* input1_data, const Dims<4>& input1_dims,
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- output_data[Offset(output_dims, c, x, y, b)] =
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax(
- input1_data[SubscriptToIndex(desc1, c, x, y, b)] -
- input2_data[SubscriptToIndex(desc2, c, x, y, b)],
- output_activation_min, output_activation_max);
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)] -
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)],
+ params.float_activation_min, params.float_activation_max);
}
}
}
}
}
-inline void BroadcastSub(int left_shift, const uint8* input1_data,
- const Dims<4>& input1_dims, int32 input1_offset,
- int32 input1_multiplier, int input1_shift,
- const uint8* input2_data, const Dims<4>& input2_dims,
- int32 input2_offset, int32 input2_multiplier,
- int input2_shift, int32 output_offset,
- int32 output_multiplier, int output_shift,
- int32 output_activation_min,
- int32 output_activation_max, uint8* output_data,
- const Dims<4>& output_dims) {
- gemmlowp::ScopedProfilingLabel label("BroadcastSub/8bit");
-
+inline void BroadcastSub4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const uint8* input1_data,
+ const RuntimeShape& input2_shape,
+ const uint8* input2_data,
+ const RuntimeShape& output_shape,
+ uint8* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/uint8");
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
@@ -1746,33 +1639,37 @@ inline void BroadcastSub(int left_shift, const uint8* input1_data,
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
const int32 input1_val =
- input1_offset + input1_data[SubscriptToIndex(desc1, c, x, y, b)];
+ params.input1_offset +
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)];
const int32 input2_val =
- input2_offset + input2_data[SubscriptToIndex(desc2, c, x, y, b)];
- const int32 shifted_input1_val = input1_val * (1 << left_shift);
- const int32 shifted_input2_val = input2_val * (1 << left_shift);
+ params.input2_offset +
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)];
+ const int32 shifted_input1_val =
+ input1_val * (1 << params.left_shift);
+ const int32 shifted_input2_val =
+ input2_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input1_val, input1_multiplier,
- kReverseShift * input1_shift);
+ shifted_input1_val, params.input1_multiplier,
+ params.input1_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- shifted_input2_val, input2_multiplier,
- kReverseShift * input2_shift);
+ shifted_input2_val, params.input2_multiplier,
+ params.input2_shift);
const int32 raw_sub = scaled_input1_val - scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
- raw_sub, output_multiplier, kReverseShift * output_shift) +
- output_offset;
+ raw_sub, params.output_multiplier, params.output_shift) +
+ params.output_offset;
const int32 clamped_output =
- std::min(output_activation_max,
- std::max(output_activation_min, raw_output));
- output_data[Offset(output_dims, c, x, y, b)] =
+ std::min(params.quantized_activation_max,
+ std::max(params.quantized_activation_min, raw_output));
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
static_cast<uint8>(clamped_output);
}
}
@@ -1780,6 +1677,156 @@ inline void BroadcastSub(int left_shift, const uint8* input1_data,
}
}
+inline void BroadcastSub4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const int32* input1_data,
+ const RuntimeShape& input2_shape,
+ const int32* input2_data,
+ const RuntimeShape& output_shape,
+ int32* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/int32");
+ NdArrayDesc<4> desc1;
+ NdArrayDesc<4> desc2;
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
+
+ // In Tensorflow, the dimensions are canonically named (batch_number, row,
+ // col, channel), with extents (batches, height, width, depth), with the
+ // trailing dimension changing most rapidly (channels has the smallest stride,
+ // typically 1 element).
+ //
+ // In generated C code, we store arrays with the dimensions reversed. The
+ // first dimension has smallest stride.
+ //
+ // We name our variables by their Tensorflow convention, but generate C code
+ // nesting loops such that the innermost loop has the smallest stride for the
+ // best cache behavior.
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
+ ActivationFunctionWithMinMax(
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)] -
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)],
+ params.quantized_activation_min,
+ params.quantized_activation_max);
+ }
+ }
+ }
+ }
+}
+
+template <typename T>
+void BroadcastSub4DSlow(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape, const T* input1_data,
+ const RuntimeShape& input2_shape, const T* input2_data,
+ const RuntimeShape& output_shape, T* output_data) {
+ gemmlowp::ScopedProfilingLabel label("BroadcastAdd4DSlow/templated");
+ NdArrayDesc<4> desc1;
+ NdArrayDesc<4> desc2;
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
+
+ // In Tensorflow, the dimensions are canonically named (batch_number, row,
+ // col, channel), with extents (batches, height, width, depth), with the
+ // trailing dimension changing most rapidly (channels has the smallest stride,
+ // typically 1 element).
+ //
+ // In generated C code, we store arrays with the dimensions reversed. The
+ // first dimension has smallest stride.
+ //
+ // We name our variables by their Tensorflow convention, but generate C code
+ // nesting loops such that the innermost loop has the smallest stride for the
+ // best cache behavior.
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
+ ActivationFunctionWithMinMax(
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)] -
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)],
+ params.quantized_activation_min,
+ params.quantized_activation_max);
+ }
+ }
+ }
+ }
+}
+
+template <typename T>
+void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape,
+ const T* input1_data, const RuntimeShape& input2_shape,
+ const T* input2_data, const RuntimeShape& output_shape,
+ T* output_data) {
+ NdArrayDesc<4> desc1;
+ NdArrayDesc<4> desc2;
+ NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
+ &desc2);
+ RuntimeShape extended_output_shape =
+ RuntimeShape::ExtendedShape(4, output_shape);
+
+ // In Tensorflow, the dimensions are canonically named (batch_number, row,
+ // col, channel), with extents (batches, height, width, depth), with the
+ // trailing dimension changing most rapidly (channels has the smallest stride,
+ // typically 1 element).
+ //
+ // In generated C code, we store arrays with the dimensions reversed. The
+ // first dimension has smallest stride.
+ //
+ // We name our variables by their Tensorflow convention, but generate C code
+ // nesting loops such that the innermost loop has the smallest stride for the
+ // best cache behavior.
+ for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
+ for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
+ for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
+ for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
+ output_data[Offset(extended_output_shape, b, y, x, c)] =
+ input1_data[SubscriptToIndex(desc1, b, y, x, c)] -
+ input2_data[SubscriptToIndex(desc2, b, y, x, c)];
+ }
+ }
+ }
+ }
+}
+
+inline void SubWithActivation(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const int32* input1_data,
+ const RuntimeShape& input2_shape,
+ const int32* input2_data,
+ const RuntimeShape& output_shape,
+ int32* output_data) {
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, input2_shape);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] - input2_data[i], params.quantized_activation_min,
+ params.quantized_activation_max);
+ }
+}
+
+inline void SubWithActivation(const ArithmeticParams& params,
+ const RuntimeShape& input1_shape,
+ const float* input1_data,
+ const RuntimeShape& input2_shape,
+ const float* input2_data,
+ const RuntimeShape& output_shape,
+ float* output_data) {
+ const int flat_size =
+ MatchingFlatSize(input1_shape, input2_shape, input2_shape);
+ for (int i = 0; i < flat_size; ++i) {
+ output_data[i] = ActivationFunctionWithMinMax(
+ input1_data[i] - input2_data[i], params.float_activation_min,
+ params.float_activation_max);
+ }
+}
+
template <FusedActivationFunctionType Ac, typename Scalar>
void Concatenation(int concat_dim, const Scalar* const* input_data,
const Dims<4>* const* input_dims, int inputs_count,
@@ -1813,6 +1860,26 @@ void Concatenation(int concat_dim, const Scalar* const* input_data,
}
}
+template <typename Scalar>
+void Pack(int dim, const Scalar* const* input_data,
+ const Dims<4>* const* input_dims, int inputs_count,
+ Scalar* output_data, const Dims<4>& output_dims) {
+ TFLITE_DCHECK(IsPackedWithoutStrides(output_dims));
+ int outer_size = 1;
+ for (int i = dim + 1; i < 4; i++) {
+ outer_size *= output_dims.sizes[i];
+ }
+ Scalar* output_ptr = output_data;
+ const int copy_size = FlatSize(**input_dims) / outer_size;
+ for (int k = 0; k < outer_size; k++) {
+ for (int i = 0; i < inputs_count; ++i) {
+ memcpy(output_ptr, input_data[i] + k * copy_size,
+ copy_size * sizeof(Scalar));
+ output_ptr += copy_size;
+ }
+ }
+}
+
// TODO(prabhumk): This is the same as the optimized implementation.
// TODO(prabhumk): The quantized implementation of concatentation isn't fully
// quantized as it takes scale as a floating point value. This should be fixed
@@ -3467,9 +3534,9 @@ inline bool Reduce(const In* input_data, const int* input_dims,
const int* output_dims, const int input_num_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
- Out reducer(Out current, const In in), Out* output_data) {
+ Out reducer(const Out current, const In in),
+ Out* output_data) {
// Reset input iterator.
- TFLITE_DCHECK(input_num_dims > 0);
for (int idx = 0; idx < input_num_dims; ++idx) {
input_iter[idx] = 0;
}
@@ -3485,11 +3552,16 @@ inline bool Reduce(const In* input_data, const int* input_dims,
return true;
}
-inline bool ResolveAxis(const int num_dims, const int* axis, const int num_axis,
- int* out_axis, int* out_num_axis) {
+inline bool ResolveAxis(const int num_dims, const int* axis,
+ const int64_t num_axis, int* out_axis,
+ int* out_num_axis) {
*out_num_axis = 0; // Just in case.
+ // Short-circuit axis resolution for scalars; the axis will go unused.
+ if (num_dims == 0) {
+ return true;
+ }
// o(n^2) is fine since out_num_axis should be really small, mostly <= 4
- for (int idx = 0; idx < num_axis; ++idx) {
+ for (int64_t idx = 0; idx < num_axis; ++idx) {
// Handle negative index.
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
TFLITE_DCHECK(current >= 0 && current < num_dims);
@@ -3515,7 +3587,7 @@ inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
Out* output_data) {
- auto reducer = [](Out current, const In in) -> Out {
+ auto reducer = [](const Out current, const In in) -> Out {
const Out actual_in = static_cast<Out>(in);
return current + actual_in;
};
@@ -3524,6 +3596,24 @@ inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
output_data);
}
+template <typename T>
+inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
+ const T init_value, T* data) {
+ size_t num_elements = 1;
+ for (int idx = 0; idx < num_dims; ++idx) {
+ size_t current = static_cast<size_t>(dims[idx]);
+ // Overflow prevention.
+ if (num_elements > std::numeric_limits<size_t>::max() / current) {
+ return false;
+ }
+ num_elements *= current;
+ }
+ for (size_t idx = 0; idx < num_elements; ++idx) {
+ data[idx] = init_value;
+ }
+ return true;
+}
+
// Computes the sum of elements across dimensions given in axis.
template <typename T>
inline bool Sum(const T* input_data, const int* input_dims,
@@ -3532,17 +3622,9 @@ inline bool Sum(const T* input_data, const int* input_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis) {
// Reset output data.
- size_t num_outputs = 1;
- for (int idx = 0; idx < output_num_dims; ++idx) {
- size_t current = static_cast<size_t>(output_dims[idx]);
- // Overflow prevention.
- if (num_outputs > std::numeric_limits<size_t>::max() / current) {
- return false;
- }
- num_outputs *= current;
- }
- for (size_t idx = 0; idx < num_outputs; ++idx) {
- output_data[idx] = T();
+ if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(0),
+ output_data)) {
+ return false;
}
// Resolve axis.
@@ -3557,6 +3639,61 @@ inline bool Sum(const T* input_data, const int* input_dims,
num_resolved_axis, temp_index, output_data);
}
+// Computes the max of elements across dimensions given in axis.
+template <typename T>
+inline bool ReduceMax(const T* input_data, const int* input_dims,
+ const int input_num_dims, T* output_data,
+ const int* output_dims, const int output_num_dims,
+ const int* axis, const int64_t num_axis_dimensions,
+ bool keep_dims, int* temp_index, int* resolved_axis) {
+ T init_value = std::numeric_limits<T>::lowest();
+ // Reset output data.
+ if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
+ output_data)) {
+ return false;
+ }
+
+ // Resolve axis.
+ int num_resolved_axis = 0;
+ if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
+ &num_resolved_axis)) {
+ return false;
+ }
+
+ auto reducer = [](const T current, const T in) -> T {
+ return (in > current) ? in : current;
+ };
+ return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
+ output_num_dims, resolved_axis, num_resolved_axis,
+ temp_index, reducer, output_data);
+}
+
+// Computes the prod of elements across dimensions given in axis.
+template <typename T>
+inline bool ReduceProd(const T* input_data, const int* input_dims,
+ const int input_num_dims, T* output_data,
+ const int* output_dims, const int output_num_dims,
+ const int* axis, const int64_t num_axis_dimensions,
+ bool keep_dims, int* temp_index, int* resolved_axis) {
+ // Reset output data.
+ if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(1),
+ output_data)) {
+ return false;
+ }
+
+ // Resolve axis.
+ int num_resolved_axis = 0;
+ if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
+ &num_resolved_axis)) {
+ return false;
+ }
+
+ auto reducer = [](const T current, const T in) -> T { return in * current; };
+ return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
+ output_num_dims, resolved_axis, num_resolved_axis,
+ temp_index, reducer, output_data);
+}
+
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis.
@@ -3649,38 +3786,6 @@ inline void Mean(const T* input_data, const Dims<4>& input_dims,
}
template <typename T>
-void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
- const Dims<4>& input2_dims, T* output_data,
- const Dims<4>& output_dims) {
- NdArrayDesc<4> desc1;
- NdArrayDesc<4> desc2;
- NdArrayDescsForElementwiseBroadcast(input1_dims, input2_dims, &desc1, &desc2);
-
- // In Tensorflow, the dimensions are canonically named (batch_number, row,
- // col, channel), with extents (batches, height, width, depth), with the
- // trailing dimension changing most rapidly (channels has the smallest stride,
- // typically 1 element).
- //
- // In generated C code, we store arrays with the dimensions reversed. The
- // first dimension has smallest stride.
- //
- // We name our variables by their Tensorflow convention, but generate C code
- // nesting loops such that the innermost loop has the smallest stride for the
- // best cache behavior.
- for (int b = 0; b < ArraySize(output_dims, 3); ++b) {
- for (int y = 0; y < ArraySize(output_dims, 2); ++y) {
- for (int x = 0; x < ArraySize(output_dims, 1); ++x) {
- for (int c = 0; c < ArraySize(output_dims, 0); ++c) {
- output_data[Offset(output_dims, c, x, y, b)] =
- input1_data[SubscriptToIndex(desc1, c, x, y, b)] -
- input2_data[SubscriptToIndex(desc2, c, x, y, b)];
- }
- }
- }
- }
-}
-
-template <typename T>
void TensorFlowMinimum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor_utils.h b/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
index 5160e22307..82f4503127 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
+++ b/tensorflow/contrib/lite/kernels/internal/tensor_utils.h
@@ -124,6 +124,10 @@ void Sub1Vector(const float* vector, int v_size, float* result);
// Fill vector with 0.f.
void ZeroVector(float* vector, int v_size);
+// Multiply all elements of vector with a scalar.
+void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
+ float* result);
+
// Clip elements of a vector using a abs_limit value.
void ClipVector(const float* vector, int v_size, float abs_limit,
float* result);
diff --git a/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc b/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
index aa0d49ae4d..372a6efec5 100644
--- a/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
+++ b/tensorflow/contrib/lite/kernels/internal/tensor_utils_test.cc
@@ -32,6 +32,22 @@ TEST(uKernels, ClipTest) {
{0.0, -0.5, 1.0, -1.5, 2.0, -2.0, 2.0, -2.0, 2.0, -2.0})));
}
+TEST(uKernels, VectorScalarMultiply) {
+ constexpr int kVectorSize = 29;
+ static int8_t input[kVectorSize];
+ for (int i = 0; i < 29; ++i) {
+ input[i] = static_cast<int8_t>(i - 14);
+ }
+ const float scale = 0.1f;
+ std::vector<float> output(kVectorSize, 0.0f);
+ VectorScalarMultiply(input, kVectorSize, scale, output.data());
+ EXPECT_THAT(output,
+ ElementsAreArray(ArrayFloatNear(
+ {-1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5,
+ -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5,
+ 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4})));
+}
+
TEST(uKernels, IsZeroTest) {
constexpr int kVectorSize = 21;
static float zeros[kVectorSize] = {0.0};
diff --git a/tensorflow/contrib/lite/kernels/internal/types.h b/tensorflow/contrib/lite/kernels/internal/types.h
index 737cfb69c9..c44698b677 100644
--- a/tensorflow/contrib/lite/kernels/internal/types.h
+++ b/tensorflow/contrib/lite/kernels/internal/types.h
@@ -119,6 +119,8 @@ class RuntimeShape {
// larger shapes are separately allocated.
static constexpr int kMaxSmallSize = 4;
+ RuntimeShape& operator=(RuntimeShape const&) = delete;
+
RuntimeShape() : size_(0) {}
explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) {
@@ -135,6 +137,20 @@ class RuntimeShape {
BuildFrom(init_list);
}
+ // Avoid using this constructor. We should be able to delete it when C++17
+ // rolls out.
+ RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) {
+ if (size_ > kMaxSmallSize) {
+ dims_pointer_ = new int32[size_];
+ }
+ std::memcpy(DimsData(), other.DimsData(), sizeof(int32) * size_);
+ }
+
+ bool operator==(const RuntimeShape& comp) const {
+ return this->size_ == comp.size_ &&
+ std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32)) == 0;
+ }
+
~RuntimeShape() {
if (size_ > kMaxSmallSize) {
delete[] dims_pointer_;
@@ -191,6 +207,16 @@ class RuntimeShape {
}
}
+ // This will probably be factored out. Old code made substantial use of 4-D
+ // shapes, and so this function is used to extend smaller shapes. Note that
+ // (a) as Dims<4>-dependent code is eliminated, the reliance on this should be
+ // reduced, and (b) some kernels are stricly 4-D, but then the shapes of their
+ // inputs should already be 4-D, so this function should not be needed.
+ inline static RuntimeShape ExtendedShape(int new_shape_size,
+ const RuntimeShape& shape) {
+ return RuntimeShape(new_shape_size, shape, 1);
+ }
+
inline void BuildFrom(const std::initializer_list<int> init_list) {
BuildFrom<const std::initializer_list<int>>(init_list);
}
@@ -208,7 +234,25 @@ class RuntimeShape {
return buffer_size;
}
+ bool operator!=(const RuntimeShape& comp) const { return !((*this) == comp); }
+
private:
+ // For use only by ExtendFrom(), written to guarantee (return-value) copy
+ // elision in C++17.
+ // This creates a shape padded to the desired size with the specified value.
+ RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value)
+ : size_(0) {
+ TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount());
+ TFLITE_CHECK_LE(new_shape_size, kMaxSmallSize);
+ Resize(new_shape_size);
+ const int size_increase = new_shape_size - shape.DimensionsCount();
+ for (int i = 0; i < size_increase; ++i) {
+ SetDim(i, pad_value);
+ }
+ std::memcpy(DimsData() + size_increase, shape.DimsData(),
+ sizeof(int32) * shape.DimensionsCount());
+ }
+
int32 size_;
union {
int32 dims_[kMaxSmallSize];
@@ -234,7 +278,9 @@ inline tflite::Dims<4> ToRuntimeDims(const tflite::RuntimeShape& array_shape) {
// Gets next index to iterate through a multidimensional array.
inline bool NextIndex(const int num_dims, const int* dims, int* current) {
- TFLITE_DCHECK_GT(num_dims, 0);
+ if (num_dims == 0) {
+ return false;
+ }
TFLITE_DCHECK(dims != nullptr);
TFLITE_DCHECK(current != nullptr);
int carry = 1;
@@ -261,7 +307,9 @@ inline bool NextIndex(const int num_dims, const int* dims, int* current) {
inline size_t ReducedOutputOffset(const int num_dims, const int* dims,
const int* index, const int num_axis,
const int* axis) {
- TFLITE_DCHECK_GT(num_dims, 0);
+ if (num_dims == 0) {
+ return 0;
+ }
TFLITE_DCHECK(dims != nullptr);
TFLITE_DCHECK(index != nullptr);
size_t offset = 0;
@@ -364,6 +412,7 @@ inline int RequiredBufferSizeForDims(const Dims<4>& dims) {
// arrays.
inline int MatchingFlatSize(const RuntimeShape& shape,
const RuntimeShape& check_shape_0) {
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
const int dims_count = shape.DimensionsCount();
for (int i = 0; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
@@ -374,6 +423,7 @@ inline int MatchingFlatSize(const RuntimeShape& shape,
inline int MatchingFlatSize(const RuntimeShape& shape,
const RuntimeShape& check_shape_0,
const RuntimeShape& check_shape_1) {
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
const int dims_count = shape.DimensionsCount();
for (int i = 0; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
@@ -385,6 +435,7 @@ inline int MatchingFlatSize(const RuntimeShape& shape,
const RuntimeShape& check_shape_0,
const RuntimeShape& check_shape_1,
const RuntimeShape& check_shape_2) {
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
const int dims_count = shape.DimensionsCount();
for (int i = 0; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
@@ -397,6 +448,7 @@ inline int MatchingFlatSize(const RuntimeShape& shape,
const RuntimeShape& check_shape_1,
const RuntimeShape& check_shape_2,
const RuntimeShape& check_shape_3) {
+ TFLITE_DCHECK_EQ(shape.DimensionsCount(), check_shape_0.DimensionsCount());
const int dims_count = shape.DimensionsCount();
for (int i = 0; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(shape.Dims(i), check_shape_0.Dims(i));
@@ -601,14 +653,74 @@ struct PoolParams {
int stride_width;
int filter_height;
int filter_width;
- // uint8, etc, inference params.
+ // uint8, etc, activation params.
int32 quantized_activation_min;
int32 quantized_activation_max;
- // float inference params.
+ // float activation params.
float float_activation_min;
float float_activation_max;
};
+enum class BroadcastableOpCategory : uint8 {
+ kNone,
+ kNonBroadcast, // Matching input shapes.
+ kFirstInputBroadcastsFast, // Fivefold nested loops.
+ kSecondInputBroadcastsFast, // Fivefold nested loops.
+ kGenericBroadcast, // Fall-back.
+};
+
+// For Add, Sub, Mul ops.
+struct ArithmeticParams {
+ // Shape dependent / common to data / op types.
+ BroadcastableOpCategory broadcast_category;
+ // uint8 inference params.
+ int32 input1_offset;
+ int32 input2_offset;
+ int32 output_offset;
+ int32 output_multiplier;
+ int output_shift;
+ // Add / Sub, not Mul, uint8 inference params.
+ int left_shift;
+ int32 input1_multiplier;
+ int input1_shift;
+ int32 input2_multiplier;
+ int input2_shift;
+ // uint8, etc, activation params.
+ int32 quantized_activation_min;
+ int32 quantized_activation_max;
+ // float activation params.
+ float float_activation_min;
+ float float_activation_max;
+
+ // Processed output dimensions.
+ // Let input "a" be the one that broadcasts in the faster-changing dimension.
+ // Then, after coalescing, for shapes {a0, a1, a2, a3, a4} and
+ // {b0, b1, b2, b3, b4},
+ // broadcast_shape[4] = b0 = a0.
+ // broadcast_shape[3] = b1; a1 = 1.
+ // broadcast_shape[2] = b2 = a2.
+ // broadcast_shape[1] = a3; b3 = 1.
+ // broadcast_shape[0] = b4 = a4.
+ int broadcast_shape[5];
+};
+
+template <typename T>
+inline void SetActivationParams(T min, T max, ArithmeticParams* params);
+
+template <>
+inline void SetActivationParams(float min, float max,
+ ArithmeticParams* params) {
+ params->float_activation_min = min;
+ params->float_activation_max = max;
+}
+
+template <>
+inline void SetActivationParams(int32 min, int32 max,
+ ArithmeticParams* params) {
+ params->quantized_activation_min = min;
+ params->quantized_activation_max = max;
+}
+
} // namespace tflite
#endif // TENSORFLOW_CONTRIB_LITE_KERNELS_INTERNAL_TYPES_H_
diff --git a/tensorflow/contrib/lite/kernels/lsh_projection.cc b/tensorflow/contrib/lite/kernels/lsh_projection.cc
index 25d2dc2cdd..69523b02cc 100644
--- a/tensorflow/contrib/lite/kernels/lsh_projection.cc
+++ b/tensorflow/contrib/lite/kernels/lsh_projection.cc
@@ -50,7 +50,6 @@ limitations under the License.
// Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
// A flattened tensor represents projected bit vectors.
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/kernels/lstm.cc b/tensorflow/contrib/lite/kernels/lstm.cc
index 3577ae6caa..ba251c451e 100644
--- a/tensorflow/contrib/lite/kernels/lstm.cc
+++ b/tensorflow/contrib/lite/kernels/lstm.cc
@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -97,7 +96,7 @@ constexpr int kCellStateTensor = 1;
constexpr int kOutputTensor = 2;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
- auto* op_data = new OpData;
+ auto* op_data = new OpData();
op_data->kernel_type = kTfLiteLSTMFullKernel;
context->AddTensors(context, /*tensors_to_add=*/7,
&op_data->scratch_tensor_index);
@@ -306,7 +305,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_output = recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_output, n_cell);
+ TF_LITE_ENSURE_OK(context, CheckInputTensorDimensions(context, node, n_input,
+ n_output, n_cell));
// Get the pointer to output, activation_state and cell_state tensors.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
@@ -846,7 +846,7 @@ enum OutputTensor {
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
- auto* op_data = new OpData;
+ auto* op_data = new OpData();
op_data->kernel_type = kTfLiteLSTMBasicKernel;
// `scratch_tensor_index` is unused in this kernel.
op_data->scratch_tensor_index = -1;
diff --git a/tensorflow/contrib/lite/kernels/lstm_test.cc b/tensorflow/contrib/lite/kernels/lstm_test.cc
index 0b7c56133e..0266f5fe57 100644
--- a/tensorflow/contrib/lite/kernels/lstm_test.cc
+++ b/tensorflow/contrib/lite/kernels/lstm_test.cc
@@ -13,6 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Unit test for TFLite LSTM op.
+//
+// TODO(alanchiao): add unit test with invalid input dimensions for this and its
+// variants.
#include <memory>
#include <vector>
diff --git a/tensorflow/contrib/lite/kernels/pack.cc b/tensorflow/contrib/lite/kernels/pack.cc
new file mode 100644
index 0000000000..bb3416f6a6
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/pack.cc
@@ -0,0 +1,131 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/lite/builtin_op_data.h"
+#include "tensorflow/contrib/lite/context.h"
+#include "tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h"
+#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
+#include "tensorflow/contrib/lite/kernels/kernel_util.h"
+
+namespace tflite {
+namespace ops {
+namespace builtin {
+namespace pack {
+namespace {
+
+constexpr int kOutputTensor = 0;
+
+// Op data for pack op.
+struct OpData {
+ int values_count;
+ int axis;
+};
+
+void* Init(TfLiteContext* context, const char* buffer, size_t length) {
+ auto* data = new OpData;
+ data->axis = 0;
+ return data;
+}
+
+void Free(TfLiteContext* context, void* buffer) {
+ delete reinterpret_cast<OpData*>(buffer);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ const OpData* data = reinterpret_cast<OpData*>(node->builtin_data);
+
+ TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ const TfLiteTensor* input0 = GetInput(context, node, 0);
+ TF_LITE_ENSURE(context, NumDimensions(input0) < 4);
+ TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis);
+ // TODO(renjieliu): Support negative axis.
+ TF_LITE_ENSURE(context, data->axis >= 0);
+ if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32) {
+ context->ReportError(context,
+ "Currently pack only supports int32 and float32.");
+ return kTfLiteError;
+ }
+ // Make sure all inputs have the same shape and type.
+ for (int i = 1; i < data->values_count; ++i) {
+ const TfLiteTensor* input = GetInput(context, node, i);
+ TF_LITE_ENSURE(context, HaveSameShapes(input0, input));
+ TF_LITE_ENSURE_EQ(context, input0->type, input->type);
+ }
+
+ // Resize output. rank R will become rank R + 1
+ const int dimension_size = NumDimensions(input0) + 1;
+ const TfLiteIntArray* input_shape = input0->dims;
+ TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size);
+ int i = 0;
+ for (int index = 0; index < dimension_size; ++index) {
+ if (index == data->axis) {
+ output_shape->data[index] = data->values_count;
+ } else {
+ output_shape->data[index] = input_shape->data[i++];
+ }
+ }
+
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ TF_LITE_ENSURE_EQ(context, output->type, input0->type);
+
+ return context->ResizeTensor(context, output, output_shape);
+}
+
+template <typename T>
+void PackImpl(TfLiteContext* context, TfLiteNode* node, TfLiteTensor* output,
+ int values_count, int axis) {
+ VectorOfTensors<T> all_inputs(*context, *node->inputs);
+ reference_ops::Pack<T>(RemapDim(NumDimensions(output), axis),
+ all_inputs.data(), all_inputs.dims(), values_count,
+ GetTensorData<T>(output), GetTensorDims(output));
+}
+
+TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
+ const OpData* data = reinterpret_cast<OpData*>(node->builtin_data);
+
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ switch (output->type) {
+ case kTfLiteFloat32: {
+ PackImpl<float>(context, node, output, data->values_count, data->axis);
+ break;
+ }
+ case kTfLiteInt32: {
+ PackImpl<int32_t>(context, node, output, data->values_count, data->axis);
+ break;
+ }
+ default: {
+ context->ReportError(context,
+ "Currently pack only supports int32 and float32.");
+ return kTfLiteError;
+ }
+ }
+
+ return kTfLiteOk;
+}
+
+} // namespace
+} // namespace pack
+
+TfLiteRegistration* Register_PACK() {
+ static TfLiteRegistration r = {pack::Init, pack::Free, pack::Prepare,
+ pack::Eval};
+ return &r;
+}
+
+} // namespace builtin
+} // namespace ops
+} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/pack_test.cc b/tensorflow/contrib/lite/kernels/pack_test.cc
new file mode 100644
index 0000000000..485a50ad3a
--- /dev/null
+++ b/tensorflow/contrib/lite/kernels/pack_test.cc
@@ -0,0 +1,120 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/kernels/test_util.h"
+#include "tensorflow/contrib/lite/model.h"
+
+namespace tflite {
+namespace {
+
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+
+template <typename T>
+class PackOpModel : public SingleOpModel {
+ public:
+ PackOpModel(const TensorData& input_template, int axis, int values_count) {
+ std::vector<std::vector<int>> all_input_shapes;
+ for (int i = 0; i < values_count; ++i) {
+ all_input_shapes.push_back(input_template.shape);
+ AddInput(input_template);
+ }
+ output_ = AddOutput({input_template.type, /*shape=*/{}, input_template.min,
+ input_template.max});
+ SetBuiltinOp(BuiltinOperator_PACK, BuiltinOptions_PackOptions,
+ CreatePackOptions(builder_, values_count, axis).Union());
+ BuildInterpreter(all_input_shapes);
+ }
+
+ void SetInput(int index, std::initializer_list<T> data) {
+ PopulateTensor(index, data);
+ }
+
+ std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
+ std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
+
+ private:
+ int output_;
+};
+
+TEST(PackOpTest, FloatThreeInputs) {
+ PackOpModel<float> model({TensorType_FLOAT32, {2}}, 0, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 4, 2, 5, 3, 6}));
+}
+
+TEST(PackOpTest, FloatThreeInputsDifferentAxis) {
+ PackOpModel<float> model({TensorType_FLOAT32, {2}}, 1, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
+}
+
+TEST(PackOpTest, FloatMultilDimensions) {
+ PackOpModel<float> model({TensorType_FLOAT32, {2, 3}}, 1, 2);
+ model.SetInput(0, {1, 2, 3, 4, 5, 6});
+ model.SetInput(1, {7, 8, 9, 10, 11, 12});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2, 3));
+ EXPECT_THAT(model.GetOutput(),
+ ElementsAreArray({1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12}));
+}
+
+TEST(PackOpTest, IntThreeInputs) {
+ PackOpModel<int32_t> model({TensorType_INT32, {2}}, 0, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 4, 2, 5, 3, 6}));
+}
+
+TEST(PackOpTest, IntThreeInputsDifferentAxis) {
+ PackOpModel<int32_t> model({TensorType_INT32, {2}}, 1, 3);
+ model.SetInput(0, {1, 4});
+ model.SetInput(1, {2, 5});
+ model.SetInput(2, {3, 6});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
+ EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
+}
+
+TEST(PackOpTest, IntMultilDimensions) {
+ PackOpModel<int32_t> model({TensorType_INT32, {2, 3}}, 1, 2);
+ model.SetInput(0, {1, 2, 3, 4, 5, 6});
+ model.SetInput(1, {7, 8, 9, 10, 11, 12});
+ model.Invoke();
+ EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2, 3));
+ EXPECT_THAT(model.GetOutput(),
+ ElementsAreArray({1, 2, 3, 7, 8, 9, 4, 5, 6, 10, 11, 12}));
+}
+} // namespace
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/kernels/pooling.cc b/tensorflow/contrib/lite/kernels/pooling.cc
index 9b0487ae16..29a5be0683 100644
--- a/tensorflow/contrib/lite/kernels/pooling.cc
+++ b/tensorflow/contrib/lite/kernels/pooling.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/kernels/pow_test.cc b/tensorflow/contrib/lite/kernels/pow_test.cc
index 474d323bc3..74b3aef5bd 100644
--- a/tensorflow/contrib/lite/kernels/pow_test.cc
+++ b/tensorflow/contrib/lite/kernels/pow_test.cc
@@ -50,22 +50,22 @@ class PowOpModel : public SingleOpModel {
};
TEST(PowOpModel, Simple) {
- PowOpModel<int32> model({TensorType_INT32, {1, 2, 2, 1}},
- {TensorType_INT32, {1, 2, 2, 1}},
- {TensorType_INT32, {}});
- model.PopulateTensor<int32>(model.input1(), {12, 2, 7, 8});
- model.PopulateTensor<int32>(model.input2(), {1, 2, 3, 1});
+ PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {}});
+ model.PopulateTensor<int32_t>(model.input1(), {12, 2, 7, 8});
+ model.PopulateTensor<int32_t>(model.input2(), {1, 2, 3, 1});
model.Invoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(12, 4, 343, 8));
}
TEST(PowOpModel, NegativeAndZeroValue) {
- PowOpModel<int32> model({TensorType_INT32, {1, 2, 2, 1}},
- {TensorType_INT32, {1, 2, 2, 1}},
- {TensorType_INT32, {}});
- model.PopulateTensor<int32>(model.input1(), {0, 2, -7, 8});
- model.PopulateTensor<int32>(model.input2(), {1, 2, 3, 0});
+ PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {}});
+ model.PopulateTensor<int32_t>(model.input1(), {0, 2, -7, 8});
+ model.PopulateTensor<int32_t>(model.input2(), {1, 2, 3, 0});
model.Invoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 4, -343, 1));
@@ -98,10 +98,10 @@ TEST(PowOpModel, NegativeFloatTest) {
}
TEST(PowOpModel, BroadcastTest) {
- PowOpModel<int32> model({TensorType_INT32, {1, 2, 2, 1}},
- {TensorType_INT32, {1}}, {TensorType_INT32, {}});
- model.PopulateTensor<int32>(model.input1(), {12, 2, 7, 8});
- model.PopulateTensor<int32>(model.input2(), {4});
+ PowOpModel<int32_t> model({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1}}, {TensorType_INT32, {}});
+ model.PopulateTensor<int32_t>(model.input1(), {12, 2, 7, 8});
+ model.PopulateTensor<int32_t>(model.input2(), {4});
model.Invoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 2, 2, 1));
EXPECT_THAT(model.GetOutput(), ElementsAre(20736, 16, 2401, 4096));
diff --git a/tensorflow/contrib/lite/kernels/reduce.cc b/tensorflow/contrib/lite/kernels/reduce.cc
index 31c331a8c6..e99f67c725 100644
--- a/tensorflow/contrib/lite/kernels/reduce.cc
+++ b/tensorflow/contrib/lite/kernels/reduce.cc
@@ -78,6 +78,10 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context, OpContext* op_context) {
size_t num_axis = NumElements(op_context->axis);
const TfLiteIntArray* input_dims = op_context->input->dims;
int input_num_dims = NumDimensions(op_context->input);
+ if (input_num_dims == 0) {
+ return context->ResizeTensor(context, op_context->output,
+ TfLiteIntArrayCreate(0));
+ }
const int* axis = GetTensorData<int>(op_context->axis);
if (op_context->params->keep_dims) {
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(input_num_dims);
@@ -315,6 +319,99 @@ TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
+template <KernelType kernel_type>
+TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) {
+ OpContext op_context(context, node);
+ int64_t num_axis = NumElements(op_context.axis);
+ TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
+ TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
+ // Resize the output tensor if the output tensor is dynamic.
+ if (IsDynamicTensor(op_context.output)) {
+ TF_LITE_ENSURE_OK(context,
+ ResizeTempAxis(context, &op_context, resolved_axis));
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ }
+
+#define TF_LITE_PROD(kernel_type, data_type) \
+ kernel_type::ReduceProd<>( \
+ GetTensorData<data_type>(op_context.input), \
+ op_context.input->dims->data, op_context.input->dims->size, \
+ GetTensorData<data_type>(op_context.output), \
+ op_context.output->dims->data, op_context.output->dims->size, \
+ GetTensorData<int>(op_context.axis), num_axis, \
+ op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+ GetTensorData<int>(resolved_axis))
+
+ if (kernel_type == kReference) {
+ switch (op_context.input->type) {
+ case kTfLiteFloat32:
+ TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, float));
+ break;
+ case kTfLiteInt32:
+ TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, int));
+ break;
+ case kTfLiteInt64:
+ TF_LITE_ENSURE(context, TF_LITE_PROD(reference_ops, int64_t));
+ break;
+ case kTfLiteUInt8:
+ // TODO(wangtz): uint8 reduce_prod is not yet supported.
+ default:
+ return kTfLiteError;
+ }
+ }
+#undef TF_LITE_PROD
+ return kTfLiteOk;
+}
+
+template <KernelType kernel_type>
+TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
+ OpContext op_context(context, node);
+ int64_t num_axis = NumElements(op_context.axis);
+ TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
+ TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
+ // Resize the output tensor if the output tensor is dynamic.
+ if (IsDynamicTensor(op_context.output)) {
+ TF_LITE_ENSURE_OK(context,
+ ResizeTempAxis(context, &op_context, resolved_axis));
+ TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
+ }
+
+#define TF_LITE_MAX(kernel_type, data_type) \
+ kernel_type::ReduceMax<>( \
+ GetTensorData<data_type>(op_context.input), \
+ op_context.input->dims->data, op_context.input->dims->size, \
+ GetTensorData<data_type>(op_context.output), \
+ op_context.output->dims->data, op_context.output->dims->size, \
+ GetTensorData<int>(op_context.axis), num_axis, \
+ op_context.params->keep_dims, GetTensorData<int>(temp_index), \
+ GetTensorData<int>(resolved_axis))
+
+ if (kernel_type == kReference) {
+ switch (op_context.input->type) {
+ case kTfLiteFloat32:
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, float));
+ break;
+ case kTfLiteInt32:
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, int));
+ break;
+ case kTfLiteInt64:
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, int64_t));
+ break;
+ case kTfLiteUInt8:
+ TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
+ op_context.output->params.scale);
+ TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
+ op_context.output->params.zero_point);
+ TF_LITE_ENSURE(context, TF_LITE_MAX(reference_ops, uint8_t));
+ break;
+ default:
+ return kTfLiteError;
+ }
+ }
+#undef TF_LITE_MAX
+ return kTfLiteOk;
+}
+
} // namespace reduce
TfLiteRegistration* Register_MEAN_REF() {
@@ -331,9 +428,27 @@ TfLiteRegistration* Register_SUM_REF() {
return &r;
}
+TfLiteRegistration* Register_REDUCE_PROD_REF() {
+ static TfLiteRegistration r = {reduce::Init, reduce::Free,
+ reduce::PrepareSimple,
+ reduce::EvalProd<reduce::kReference>};
+ return &r;
+}
+
+TfLiteRegistration* Register_REDUCE_MAX_REF() {
+ static TfLiteRegistration r = {reduce::Init, reduce::Free,
+ reduce::PrepareSimple,
+ reduce::EvalMax<reduce::kReference>};
+ return &r;
+}
+
// TODO(kanlig): add optimized implementation of Mean.
TfLiteRegistration* Register_MEAN() { return Register_MEAN_REF(); }
TfLiteRegistration* Register_SUM() { return Register_SUM_REF(); }
+TfLiteRegistration* Register_REDUCE_PROD() {
+ return Register_REDUCE_PROD_REF();
+}
+TfLiteRegistration* Register_REDUCE_MAX() { return Register_REDUCE_MAX_REF(); }
} // namespace builtin
} // namespace ops
diff --git a/tensorflow/contrib/lite/kernels/reduce_test.cc b/tensorflow/contrib/lite/kernels/reduce_test.cc
index 9e946822c6..5d432d34ef 100644
--- a/tensorflow/contrib/lite/kernels/reduce_test.cc
+++ b/tensorflow/contrib/lite/kernels/reduce_test.cc
@@ -22,13 +22,14 @@ namespace tflite {
namespace {
using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
class BaseOpModel : public SingleOpModel {
public:
- void SetAxis(std::initializer_list<int> data) { PopulateTensor(axis_, data); }
+ void SetAxis(const std::vector<int>& data) { PopulateTensor(axis_, data); }
template <class T>
- void SetInput(std::initializer_list<T> data) {
+ void SetInput(std::vector<T> data) {
PopulateTensor(input_, data);
}
@@ -110,14 +111,72 @@ class SumOpDynamicModel : public BaseOpModel {
}
};
+// Model for the tests case where axis is a const tensor.
+class ProdOpConstModel : public BaseOpModel {
+ public:
+ ProdOpConstModel(const TensorData& input, const TensorData& output,
+ std::initializer_list<int> axis_shape,
+ std::initializer_list<int> axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_PROD, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
+// Model for the tests case where axis is a dynamic tensor.
+class ProdOpDynamicModel : public BaseOpModel {
+ public:
+ ProdOpDynamicModel(const TensorData& input, const TensorData& output,
+ const TensorData& axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddInput(axis);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_PROD, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
+// Model for the tests case where axis is a const tensor.
+class MaxOpConstModel : public BaseOpModel {
+ public:
+ MaxOpConstModel(const TensorData& input, const TensorData& output,
+ std::initializer_list<int> axis_shape,
+ std::initializer_list<int> axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_MAX, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
+// Model for the tests case where axis is a dynamic tensor.
+class MaxOpDynamicModel : public BaseOpModel {
+ public:
+ MaxOpDynamicModel(const TensorData& input, const TensorData& output,
+ const TensorData& axis, bool keep_dims) {
+ input_ = AddInput(input);
+ axis_ = AddInput(axis);
+ output_ = AddOutput(output);
+ SetBuiltinOp(BuiltinOperator_REDUCE_MAX, BuiltinOptions_ReducerOptions,
+ CreateReducerOptions(builder_, keep_dims).Union());
+ BuildInterpreter({GetShape(input_)});
+ }
+};
+
// for quantized Add, the error shouldn't exceed step
float GetTolerance(int min, int max) { return (max - min) / 255.0; }
// Tests for reduce_mean
TEST(ConstFloatMeanOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
@@ -127,9 +186,9 @@ TEST(ConstFloatMeanOpTest, NotKeepDims) {
}
TEST(ConstFloatMeanOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
@@ -139,14 +198,24 @@ TEST(ConstFloatMeanOpTest, KeepDims) {
ElementsAreArray(ArrayFloatNear({10.5, 12.5, 14.5})));
}
+TEST(ConstFloatMeanOpTest, Scalar) {
+ std::vector<float> data = {3.27};
+ MeanOpConstModel m({TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, {},
+ {0}, true);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), IsEmpty());
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({3.27})));
+}
+
TEST(DynamicFloatMeanOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
- std::initializer_list<int> axis = {1, 0, -3, -3};
+ std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -155,13 +224,13 @@ TEST(DynamicFloatMeanOpTest, NotKeepDims) {
}
TEST(DynamicFloatMeanOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}},
true);
- std::initializer_list<int> axis = {0, 2};
+ std::vector<int> axis = {0, 2};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -171,10 +240,10 @@ TEST(DynamicFloatMeanOpTest, KeepDims) {
}
TEST(DynamicFloatMeanOpTest, Scale) {
- std::initializer_list<float> data = {9.527};
+ std::vector<float> data = {9.527};
MeanOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -185,7 +254,7 @@ TEST(DynamicFloatMeanOpTest, Scale) {
TEST(ConstUint8MeanOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
MeanOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -197,7 +266,7 @@ TEST(ConstUint8MeanOpTest, NotKeepDims) {
TEST(ConstUint8MeanOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
MeanOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -210,11 +279,11 @@ TEST(ConstUint8MeanOpTest, KeepDims) {
TEST(DynamicUint8MeanOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
- std::initializer_list<float> data = {1.3, -4.8, -3.6, 0.24};
+ std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
MeanOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
{TensorType_UINT8, {2}, -5.0, 2.0},
{TensorType_INT32, {1}}, false);
- std::initializer_list<int> axis = {1};
+ std::vector<int> axis = {1};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -226,11 +295,11 @@ TEST(DynamicUint8MeanOpTest, NotKeepDims) {
TEST(DynamicUint8MeanOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
- std::initializer_list<float> data = {11.14, -0.14, 7.423, 0.879};
+ std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
MeanOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
{TensorType_UINT8, {2}, -10.0, 12.0},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -243,9 +312,9 @@ TEST(DynamicUint8MeanOpTest, KeepDims) {
// Tests for reduce_sum
TEST(ConstFloatSumOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
@@ -256,9 +325,9 @@ TEST(ConstFloatSumOpTest, NotKeepDims) {
}
TEST(ConstFloatSumOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
@@ -269,13 +338,13 @@ TEST(ConstFloatSumOpTest, KeepDims) {
}
TEST(DynamicFloatSumOpTest, NotKeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
- std::initializer_list<int> axis = {1, 0, -3, -3};
+ std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -284,13 +353,23 @@ TEST(DynamicFloatSumOpTest, NotKeepDims) {
ElementsAreArray(ArrayFloatNear({144, 156})));
}
+TEST(ConstFloatSumOpTest, Scalar) {
+ std::vector<float> data = {17.};
+ SumOpConstModel m({TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}}, {}, {0},
+ false);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), IsEmpty());
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({17.})));
+}
+
TEST(DynamicFloatSumOpTest, KeepDims) {
- std::initializer_list<float> data = {
- 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SumOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
- std::initializer_list<int> axis = {0, 2};
+ std::vector<int> axis = {0, 2};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -300,10 +379,10 @@ TEST(DynamicFloatSumOpTest, KeepDims) {
}
TEST(DynamicFloatSumOpTest, Scale) {
- std::initializer_list<float> data = {9.527};
+ std::vector<float> data = {9.527};
SumOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
@@ -313,7 +392,7 @@ TEST(DynamicFloatSumOpTest, Scale) {
TEST(ConstUint8SumOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
SumOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -326,7 +405,7 @@ TEST(ConstUint8SumOpTest, NotKeepDims) {
TEST(ConstUint8SumOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
- std::initializer_list<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
SumOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
@@ -339,11 +418,11 @@ TEST(ConstUint8SumOpTest, KeepDims) {
TEST(DynamicUint8SumOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
- std::initializer_list<float> data = {1.3, -4.8, -3.6, 0.24};
+ std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
SumOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
{TensorType_UINT8, {2}, -5.0, 2.0},
{TensorType_INT32, {1}}, false);
- std::initializer_list<int> axis = {1};
+ std::vector<int> axis = {1};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -355,11 +434,11 @@ TEST(DynamicUint8SumOpTest, NotKeepDims) {
TEST(DynamicUint8SumOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
- std::initializer_list<float> data = {11.14, -0.14, 7.423, 0.879};
+ std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
SumOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
{TensorType_UINT8, {2}, -10.0, 12.0},
{TensorType_INT32, {1}}, true);
- std::initializer_list<int> axis = {0};
+ std::vector<int> axis = {0};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
@@ -369,6 +448,223 @@ TEST(DynamicUint8SumOpTest, KeepDims) {
ElementsAreArray(ArrayFloatNear({6.47059, 10.698}, kQuantizedTolerance)));
}
+// Tests for reduce_prod
+
+TEST(ConstFloatProdOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+ {4}, {1, 0, -3, -3}, false);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(
+ m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({3.162341376e+11, 1.9619905536e+12})));
+}
+
+TEST(ConstFloatProdOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+ {2}, {0, 2}, true);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(
+ ArrayFloatNear({7.74592e+06, 1.197504e+08, 6.6889152e+08})));
+}
+
+TEST(DynamicFloatProdOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
+ false);
+ std::vector<int> axis = {1, 0, -3, -3};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(
+ m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({3.16234143225e+11, 1.9619905536e+12})));
+}
+
+TEST(DynamicFloatProdOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ ProdOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}},
+ true);
+ std::vector<int> axis = {0, 2};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(
+ ArrayFloatNear({7.74592e+06, 1.197504e+08, 6.6889152e+08})));
+}
+
+TEST(DynamicFloatProdOpTest, Scale) {
+ std::vector<float> data = {9.527};
+ ProdOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
+}
+
+// Tests for reduce_max
+
+TEST(ConstFloatMaxOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
+ {4}, {1, 0, -3, -3}, false);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({23, 24})));
+}
+
+TEST(ConstFloatMaxOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
+ {2}, {0, 2}, true);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({20, 22, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, NotKeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
+ false);
+ std::vector<int> axis = {1, 0, -3, -3};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({23, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, KeepDims) {
+ std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
+ 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
+ 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
+ MaxOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
+ {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
+ std::vector<int> axis = {0, 2};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
+ EXPECT_THAT(m.GetOutput<float>(),
+ ElementsAreArray(ArrayFloatNear({20, 22, 24})));
+}
+
+TEST(DynamicFloatMaxOpTest, Scale) {
+ std::vector<float> data = {9.527};
+ MaxOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.SetAxis(axis);
+ m.SetInput(data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
+ EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
+}
+
+TEST(ConstUint8MaxOpTest, NotKeepDims) {
+ float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ MaxOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
+ {TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({0.501961, 0.603922}, kQuantizedTolerance)));
+}
+
+TEST(ConstUint8MaxOpTest, KeepDims) {
+ float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
+ std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
+ MaxOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
+ {TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({0.4, 0.4, 0.603922}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, NotKeepDims) {
+ float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
+ std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
+ MaxOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
+ {TensorType_UINT8, {2}, -5.0, 2.0},
+ {TensorType_INT32, {1}}, false);
+ std::vector<int> axis = {1};
+ m.SetAxis(axis);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({1.2902, 0.247059}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, KeepDims) {
+ float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
+ std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
+ MaxOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
+ {TensorType_UINT8, {2}, -10.0, 12.0},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.SetAxis(axis);
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(
+ ArrayFloatNear({11.1294, 0.862745}, kQuantizedTolerance)));
+}
+
+TEST(DynamicUint8MaxOpTest, Scalar) {
+ float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
+ std::vector<float> data = {11.14};
+ MaxOpDynamicModel m({TensorType_UINT8, {}, -10.0, 12.0},
+ {TensorType_UINT8, {}, -10.0, 12.0},
+ {TensorType_INT32, {1}}, true);
+ std::vector<int> axis = {0};
+ m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
+ m.Invoke();
+ EXPECT_THAT(m.GetOutputShape(), IsEmpty());
+ EXPECT_THAT(m.GetDequantizedOutput(),
+ ElementsAreArray(ArrayFloatNear({11.1294}, kQuantizedTolerance)));
+}
+
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc
index 1994e85ce3..0b70bed308 100644
--- a/tensorflow/contrib/lite/kernels/register.cc
+++ b/tensorflow/contrib/lite/kernels/register.cc
@@ -91,6 +91,8 @@ TfLiteRegistration* Register_FLOOR();
TfLiteRegistration* Register_TILE();
TfLiteRegistration* Register_NEG();
TfLiteRegistration* Register_SUM();
+TfLiteRegistration* Register_REDUCE_PROD();
+TfLiteRegistration* Register_REDUCE_MAX();
TfLiteRegistration* Register_SELECT();
TfLiteRegistration* Register_SLICE();
TfLiteRegistration* Register_SIN();
@@ -104,6 +106,7 @@ TfLiteRegistration* Register_RSQRT();
TfLiteRegistration* Register_SHAPE();
TfLiteRegistration* Register_POW();
TfLiteRegistration* Register_FAKE_QUANT();
+TfLiteRegistration* Register_PACK();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
@@ -182,6 +185,8 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_TRANSPOSE_CONV, Register_TRANSPOSE_CONV());
AddBuiltin(BuiltinOperator_TILE, Register_TILE());
AddBuiltin(BuiltinOperator_SUM, Register_SUM());
+ AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD());
+ AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL());
@@ -190,7 +195,8 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RSQRT, Register_RSQRT());
AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE());
AddBuiltin(BuiltinOperator_POW, Register_POW());
- AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT());
+ AddBuiltin(BuiltinOperator_FAKE_QUANT, Register_FAKE_QUANT(), 1, 2);
+ AddBuiltin(BuiltinOperator_PACK, Register_PACK());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.
diff --git a/tensorflow/contrib/lite/kernels/reshape.cc b/tensorflow/contrib/lite/kernels/reshape.cc
index 3287040695..99ecc16093 100644
--- a/tensorflow/contrib/lite/kernels/reshape.cc
+++ b/tensorflow/contrib/lite/kernels/reshape.cc
@@ -25,16 +25,11 @@ namespace builtin {
namespace reshape {
constexpr int kInputTensor = 0;
+constexpr int kShapeTensor = 1;
constexpr int kOutputTensor = 0;
-TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
- auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data);
-
- // TODO(ahentz): we are often given a tensor with the shape but we only pay
- // attention to what the shape specified in 'params'.
- TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
- TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
-
+TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node,
+ TfLiteIntArray* output_shape) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
@@ -47,32 +42,76 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
num_input_elements *= SizeOfDimension(input, i);
}
- TfLiteIntArray* output_size = TfLiteIntArrayCreate(params->num_dimensions);
int num_output_elements = 1;
int stretch_dim = -1;
- for (int i = 0; i < params->num_dimensions; ++i) {
- int value = params->shape[i];
+ for (int i = 0; i < output_shape->size; ++i) {
+ int value = output_shape->data[i];
if (value == -1) {
TF_LITE_ENSURE_EQ(context, stretch_dim, -1);
stretch_dim = i;
} else {
num_output_elements *= value;
- output_size->data[i] = value;
}
}
if (stretch_dim != -1) {
- output_size->data[stretch_dim] = num_input_elements / num_output_elements;
- num_output_elements *= output_size->data[stretch_dim];
+ output_shape->data[stretch_dim] = num_input_elements / num_output_elements;
+ num_output_elements *= output_shape->data[stretch_dim];
}
TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements);
- return context->ResizeTensor(context, output, output_size);
+ return context->ResizeTensor(context, output, output_shape);
+}
+
+TfLiteStatus ResizeOutputWithShapeTensor(TfLiteContext* context,
+ TfLiteNode* node) {
+ const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
+
+ TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape->dims->data[0]);
+ for (int i = 0; i < output_shape->size; ++i) {
+ output_shape->data[i] = shape->data.i32[i];
+ }
+ return ResizeOutput(context, node, output_shape);
+}
+
+TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
+ auto* params = reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data);
+
+ TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2);
+ TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
+
+ // Attempt to use shape tensor if it exists.
+ if (NumInputs(node) == 2) {
+ const TfLiteTensor* shape = GetInput(context, node, kShapeTensor);
+ // Check if the shape tensor is valid.
+ if (shape->dims->size == 1 && shape->type == kTfLiteInt32) {
+ // Set the output tensor as dynamic if the shape isn't constnat.
+ if (!IsConstantTensor(shape)) {
+ TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ SetTensorToDynamic(output);
+ return kTfLiteOk;
+ }
+ // Shape is constant. Resize now.
+ return ResizeOutputWithShapeTensor(context, node);
+ }
+ }
+ // The function is returned above this line if the shape tensor is usable.
+ // Now fallback to the shape parameter in `TfLiteReshapeParams`.
+
+ TfLiteIntArray* output_shape = TfLiteIntArrayCreate(params->num_dimensions);
+ for (int i = 0; i < params->num_dimensions; ++i) {
+ output_shape->data[i] = params->shape[i];
+ }
+ return ResizeOutput(context, node, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
+ if (IsDynamicTensor(output)) {
+ TF_LITE_ENSURE_OK(context, ResizeOutputWithShapeTensor(context, node));
+ }
+
memcpy(output->data.raw, input->data.raw, input->bytes);
return kTfLiteOk;
diff --git a/tensorflow/contrib/lite/kernels/sparse_to_dense.cc b/tensorflow/contrib/lite/kernels/sparse_to_dense.cc
index 404c32ad9c..7be5e66c16 100644
--- a/tensorflow/contrib/lite/kernels/sparse_to_dense.cc
+++ b/tensorflow/contrib/lite/kernels/sparse_to_dense.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/kernels/sub.cc b/tensorflow/contrib/lite/kernels/sub.cc
index 1247525d41..77a1f59689 100644
--- a/tensorflow/contrib/lite/kernels/sub.cc
+++ b/tensorflow/contrib/lite/kernels/sub.cc
@@ -78,29 +78,47 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
}
template <KernelType kernel_type>
-void EvalFloat(TfLiteContext* context, TfLiteNode* node,
- TfLiteSubParams* params, const OpData* data,
- const TfLiteTensor* input1, const TfLiteTensor* input2,
- TfLiteTensor* output) {
- float output_activation_min, output_activation_max;
- CalculateActivationRange(params->activation, &output_activation_min,
- &output_activation_max);
-#define TF_LITE_SUB(type, opname) \
- type::opname(GetTensorData<float>(input1), GetTensorDims(input1), \
- GetTensorData<float>(input2), GetTensorDims(input2), \
- output_activation_min, output_activation_max, \
- GetTensorData<float>(output), GetTensorDims(output))
- if (kernel_type == kReference) {
- if (data->requires_broadcast) {
- TF_LITE_SUB(reference_ops, BroadcastSub);
+void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params,
+ const OpData* data, const TfLiteTensor* input1,
+ const TfLiteTensor* input2, TfLiteTensor* output) {
+#define TF_LITE_SUB(type, opname, data_type) \
+ data_type output_activation_min, output_activation_max; \
+ CalculateActivationRange(params->activation, &output_activation_min, \
+ &output_activation_max); \
+ tflite::ArithmeticParams op_params; \
+ SetActivationParams(output_activation_min, output_activation_max, \
+ &op_params); \
+ type::opname(op_params, GetTensorShape(input1), \
+ GetTensorData<data_type>(input1), GetTensorShape(input2), \
+ GetTensorData<data_type>(input2), GetTensorShape(output), \
+ GetTensorData<data_type>(output))
+ if (output->type == kTfLiteInt32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(reference_ops, BroadcastSub4DSlow, int32_t);
+ } else {
+ TF_LITE_SUB(reference_ops, SubWithActivation, int32_t);
+ }
} else {
- TF_LITE_SUB(reference_ops, Sub);
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(optimized_ops, BroadcastSub4DSlow, int32_t);
+ } else {
+ TF_LITE_SUB(optimized_ops, SubWithActivation, int32_t);
+ }
}
- } else {
- if (data->requires_broadcast) {
- TF_LITE_SUB(optimized_ops, BroadcastSub);
+ } else if (output->type == kTfLiteFloat32) {
+ if (kernel_type == kReference) {
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(reference_ops, BroadcastSub4DSlow, float);
+ } else {
+ TF_LITE_SUB(reference_ops, SubWithActivation, float);
+ }
} else {
- TF_LITE_SUB(optimized_ops, Sub);
+ if (data->requires_broadcast) {
+ TF_LITE_SUB(optimized_ops, BroadcastSub4DSlow, float);
+ } else {
+ TF_LITE_SUB(optimized_ops, SubWithActivation, float);
+ }
}
}
#undef TF_LITE_SUB
@@ -128,36 +146,43 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
int input1_shift;
QuantizeMultiplierSmallerThanOneExp(real_input1_multiplier,
&input1_multiplier, &input1_shift);
- input1_shift *= -1;
int32 input2_multiplier;
int input2_shift;
QuantizeMultiplierSmallerThanOneExp(real_input2_multiplier,
&input2_multiplier, &input2_shift);
- input2_shift *= -1;
int32 output_multiplier;
int output_shift;
QuantizeMultiplierSmallerThanOneExp(real_output_multiplier,
&output_multiplier, &output_shift);
- output_shift *= -1;
int32 output_activation_min, output_activation_max;
CalculateActivationRangeUint8(params->activation, output,
&output_activation_min, &output_activation_max);
-#define TF_LITE_SUB(type, opname) \
- type::opname(left_shift, GetTensorData<uint8_t>(input1), \
- GetTensorDims(input1), input1_offset, input1_multiplier, \
- input1_shift, GetTensorData<uint8_t>(input2), \
- GetTensorDims(input2), input2_offset, input2_multiplier, \
- input2_shift, output_offset, output_multiplier, output_shift, \
- output_activation_min, output_activation_max, \
- GetTensorData<uint8_t>(output), GetTensorDims(output));
+#define TF_LITE_SUB(type, opname) \
+ tflite::ArithmeticParams op_params; \
+ op_params.left_shift = left_shift; \
+ op_params.input1_offset = input1_offset; \
+ op_params.input1_multiplier = input1_multiplier; \
+ op_params.input1_shift = input1_shift; \
+ op_params.input2_offset = input2_offset; \
+ op_params.input2_multiplier = input2_multiplier; \
+ op_params.input2_shift = input2_shift; \
+ op_params.output_offset = output_offset; \
+ op_params.output_multiplier = output_multiplier; \
+ op_params.output_shift = output_shift; \
+ SetActivationParams(output_activation_min, output_activation_max, \
+ &op_params); \
+ type::opname(op_params, GetTensorShape(input1), \
+ GetTensorData<uint8_t>(input1), GetTensorShape(input2), \
+ GetTensorData<uint8_t>(input2), GetTensorShape(output), \
+ GetTensorData<uint8_t>(output))
// The quantized version of Sub doesn't support activations, so we
// always use BroadcastSub.
if (kernel_type == kReference) {
- TF_LITE_SUB(reference_ops, BroadcastSub);
+ TF_LITE_SUB(reference_ops, BroadcastSub4DSlow);
} else {
- TF_LITE_SUB(optimized_ops, BroadcastSub);
+ TF_LITE_SUB(optimized_ops, BroadcastSub4DSlow);
}
#undef TF_LITE_SUB
}
@@ -171,14 +196,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- if (output->type == kTfLiteFloat32) {
- EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
+ if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) {
+ EvalSub<kernel_type>(context, node, params, data, input1, input2, output);
} else if (output->type == kTfLiteUInt8) {
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
context->ReportError(
- context, "output type %d is not supported, requires float|uint8 types.",
+ context,
+ "output type %d is not supported, requires float|uint8|int32 types.",
output->type);
return kTfLiteError;
}
diff --git a/tensorflow/contrib/lite/kernels/sub_test.cc b/tensorflow/contrib/lite/kernels/sub_test.cc
index ff07aeec49..5978c574d3 100644
--- a/tensorflow/contrib/lite/kernels/sub_test.cc
+++ b/tensorflow/contrib/lite/kernels/sub_test.cc
@@ -52,6 +52,13 @@ class FloatSubOpModel : public BaseSubOpModel {
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
+class IntegerSubOpModel : public BaseSubOpModel {
+ public:
+ using BaseSubOpModel::BaseSubOpModel;
+
+ std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
+};
+
class QuantizedSubOpModel : public BaseSubOpModel {
public:
using BaseSubOpModel::BaseSubOpModel;
@@ -125,6 +132,57 @@ TEST(FloatSubOpModel, WithBroadcast) {
}
}
+TEST(IntegerSubOpModel, NoActivation) {
+ IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3}));
+}
+
+TEST(IntegerSubOpModel, ActivationRELU_N1_TO_1) {
+ IntegerSubOpModel m({TensorType_INT32, {1, 2, 2, 1}},
+ {TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1, 0, 1, 1}));
+}
+
+TEST(IntegerSubOpModel, VariousInputShapes) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1, 2, 3, 5, 11, 1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(), ElementsAreArray({-21, 0, 4, 3, 0, 19}))
+ << "With shape number " << i;
+ }
+}
+
+TEST(IntegerSubOpModel, WithBroadcast) {
+ std::vector<std::initializer_list<int>> test_shapes = {
+ {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
+ for (int i = 0; i < test_shapes.size(); ++i) {
+ IntegerSubOpModel m({TensorType_INT32, test_shapes[i]},
+ {TensorType_INT32, {}}, // always a scalar
+ {TensorType_INT32, {}}, ActivationFunctionType_NONE);
+ m.PopulateTensor<int32_t>(m.input1(), {-20, 2, 7, 8, 11, 20});
+ m.PopulateTensor<int32_t>(m.input2(), {1});
+ m.Invoke();
+ EXPECT_THAT(m.GetOutput(),
+ ElementsAreArray(ArrayFloatNear({-21, 1, 6, 7, 10, 19})))
+ << "With shape number " << i;
+ }
+}
+
TEST(QuantizedSubOpModel, QuantizedTestsNoActivation) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<std::initializer_list<float>> inputs1 = {
diff --git a/tensorflow/contrib/lite/kernels/svdf.cc b/tensorflow/contrib/lite/kernels/svdf.cc
index 22eebdd4ce..6d4912ce3a 100644
--- a/tensorflow/contrib/lite/kernels/svdf.cc
+++ b/tensorflow/contrib/lite/kernels/svdf.cc
@@ -16,7 +16,6 @@ limitations under the License.
// SVDF op that compresses a fully connected op via low-rank matrix
// factorization. See https://research.google.com/pubs/archive/43813.pdf for
// details.
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -105,7 +104,7 @@ constexpr int kStateTensor = 0;
constexpr int kOutputTensor = 1;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
- auto* op_data = new OpData;
+ auto* op_data = new OpData();
op_data->float_weights_time_initialized = false;
context->AddTensors(context, /*tensors_to_add=*/4,
&op_data->scratch_tensor_index);
diff --git a/tensorflow/contrib/lite/kernels/transpose_conv.cc b/tensorflow/contrib/lite/kernels/transpose_conv.cc
index 7182374a6f..a9baa5c698 100644
--- a/tensorflow/contrib/lite/kernels/transpose_conv.cc
+++ b/tensorflow/contrib/lite/kernels/transpose_conv.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -22,7 +21,6 @@ limitations under the License.
#include "tensorflow/contrib/lite/builtin_op_data.h"
#include "tensorflow/contrib/lite/context.h"
-#include "tensorflow/contrib/lite/kernels/eigen_support.h"
#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/contrib/lite/kernels/internal/tensor.h"
#include "tensorflow/contrib/lite/kernels/kernel_util.h"
@@ -39,35 +37,9 @@ constexpr int kWeightsTensor = 1;
constexpr int kDataInputTensor = 2;
constexpr int kOutputTensor = 0;
-const int kTensorNotAllocated = -1;
-
-struct OpData {
- // IDs are the arbitrary identifiers used by TF Lite to identify and access
- // memory buffers.
- int im2col_id = kTensorNotAllocated;
-
- // im2col is the only temporary currently tracked, therefore always index 0.
- // If more temporaries are added, they should be properly tracked.
- int32_t im2col_index = 0;
-};
-
-void* Init(TfLiteContext* context, const char* buffer, size_t length) {
- // This is a builtin op, so we don't use the contents in 'buffer', if any.
- // Instead, we allocate a new object to use as scratch space for im2col, and
- // to carry information from Prepare() to Eval().
- auto* data = new OpData;
- eigen_support::IncrementUsageCounter(context);
- return data;
-}
-
-void Free(TfLiteContext* context, void* buffer) {
- eigen_support::DecrementUsageCounter(context);
- delete reinterpret_cast<OpData*>(buffer);
-}
-
-TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
- const TfLiteTensor* output_shape,
- TfLiteTensor* output) {
+TfLiteStatus ResizeOutputShape(TfLiteContext* context,
+ const TfLiteTensor* output_shape,
+ TfLiteTensor* output) {
// Currently only support int32 for output shape.
if (output_shape->type != kTfLiteInt32) {
context->ReportError(context, "Output shape is %d, not int32.",
@@ -83,60 +55,15 @@ TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
return context->ResizeTensor(context, output, output_shape_array);
}
-// Allocate temporary im2col tensor.
-static TfLiteStatus AllocateIm2colTensor(TfLiteContext* context,
- TfLiteNode* node) {
- OpData* data = reinterpret_cast<OpData*>(node->user_data);
- if (data->im2col_id == kTensorNotAllocated) {
- context->AddTensors(context, 1, &data->im2col_id);
- }
-
- TfLiteIntArrayFree(node->temporaries);
- node->temporaries = TfLiteIntArrayCreate(1);
- node->temporaries->data[data->im2col_index] = data->im2col_id;
-
- return kTfLiteOk;
-}
-
-TfLiteStatus ResizeIm2ColTensor(TfLiteContext* context,
- const TfLiteTensor* output_shape,
- const TfLiteTensor* weights,
- const TfLiteTensor* input,
- TfLiteTensor* im2col) {
- if (output_shape->type != kTfLiteInt32) {
- context->ReportError(context, "im2col shape is %d, not int32.",
- output_shape->type);
- return kTfLiteError;
- }
- TF_LITE_ENSURE_EQ(context, NumElements(output_shape), 4);
- TfLiteIntArray* im2col_shape_array = TfLiteIntArrayCreate(4);
- im2col_shape_array->data[0] = output_shape->data.i32[0];
- im2col_shape_array->data[1] = output_shape->data.i32[1];
- im2col_shape_array->data[2] = output_shape->data.i32[2];
- const int input_depth = SizeOfDimension(input, 3);
- const int filter_width = SizeOfDimension(weights, 1);
- const int filter_height = SizeOfDimension(weights, 2);
- im2col_shape_array->data[3] = input_depth * filter_height * filter_width;
-
- im2col->type = input->type;
- im2col->allocation_type = kTfLiteArenaRw;
- return context->ResizeTensor(context, im2col, im2col_shape_array);
-}
-
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
- TF_LITE_ENSURE_STATUS(AllocateIm2colTensor(context, node));
-
const TfLiteTensor* output_shape =
GetInput(context, node, kOutputShapeTensor);
const TfLiteTensor* weights = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- OpData* user_data = reinterpret_cast<OpData*>(node->user_data);
- TfLiteTensor* im2col =
- &context->tensors[node->temporaries->data[user_data->im2col_index]];
TF_LITE_ENSURE_EQ(context, NumDimensions(output_shape), 1);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
@@ -153,15 +80,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, SizeOfDimension(input, 3),
SizeOfDimension(weights, 3));
- if (IsConstantTensor(output_shape)) {
- TF_LITE_ENSURE_STATUS(ResizeOutputTensor(context, output_shape, output));
- TF_LITE_ENSURE_STATUS(
- ResizeIm2ColTensor(context, output_shape, weights, input, im2col));
- } else {
- // Defer resizing until Eval().
+ if (!IsConstantTensor(output_shape)) {
SetTensorToDynamic(output);
+ return kTfLiteOk;
}
- return kTfLiteOk;
+ return ResizeOutputShape(context, output_shape, output);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@@ -170,19 +93,13 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* weights = GetInput(context, node, kWeightsTensor);
const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
- OpData* user_data = reinterpret_cast<OpData*>(node->user_data);
- TfLiteTensor* im2col =
- &context->tensors[node->temporaries->data[user_data->im2col_index]];
+
const auto* params =
reinterpret_cast<TfLiteTransposeConvParams*>(node->builtin_data);
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
- ResizeOutputTensor(context, output_shape, output));
- }
- if (IsDynamicTensor(im2col)) {
- TF_LITE_ENSURE_OK(context, ResizeIm2ColTensor(context, output_shape,
- weights, input, im2col));
+ ResizeOutputShape(context, output_shape, output));
}
// Get height and width of the output image.
@@ -201,12 +118,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Currently only support float32.
switch (input->type) {
case kTfLiteFloat32:
- optimized_ops::TransposeConv(
+ reference_ops::TransposeConv(
GetTensorData<float>(input), GetTensorDims(input),
GetTensorData<float>(weights), GetTensorDims(weights), stride_width,
stride_height, padding_size.width, padding_size.height,
GetTensorData<float>(output), GetTensorDims(output),
- GetTensorData<float>(im2col), GetTensorDims(im2col));
+ // Last two args specify im2col which reference_ops ignores.
+ // (Note this does not lead to a performance regression, as the
+ // previous optimized version was just a copy of the reference code.)
+ // TODO(b/110208176): Allocate im2col tensors and switch to
+ // optimized_ops.
+ GetTensorData<float>(output), GetTensorDims(output));
break;
default:
context->ReportError(context, "Type %d, not currently supported.",
@@ -219,8 +141,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} // namespace transpose_conv
TfLiteRegistration* Register_TRANSPOSE_CONV() {
- static TfLiteRegistration r = {transpose_conv::Init, transpose_conv::Free,
- transpose_conv::Prepare, transpose_conv::Eval};
+ static TfLiteRegistration r = {nullptr, nullptr, transpose_conv::Prepare,
+ transpose_conv::Eval};
return &r;
}
diff --git a/tensorflow/contrib/lite/kernels/transpose_conv_test.cc b/tensorflow/contrib/lite/kernels/transpose_conv_test.cc
index c741df19de..55df897180 100644
--- a/tensorflow/contrib/lite/kernels/transpose_conv_test.cc
+++ b/tensorflow/contrib/lite/kernels/transpose_conv_test.cc
@@ -14,7 +14,6 @@ limitations under the License.
==============================================================================*/
#include <cstdarg>
#include <gtest/gtest.h>
-#include "absl/memory/memory.h"
#include "tensorflow/contrib/lite/interpreter.h"
#include "tensorflow/contrib/lite/kernels/register.h"
#include "tensorflow/contrib/lite/kernels/test_util.h"
@@ -25,49 +24,9 @@ namespace {
using ::testing::ElementsAreArray;
-class ConstTransposeConvOpModel : public SingleOpModel {
- // Just to be extra confusing, transpose_conv has an _input_ named
- // "output_shape". This input sets the shape of the output tensor of the op.
- // In this version of the test class, "output_shape" is a constant that must
- // be specified in the constructor.
- public:
- ConstTransposeConvOpModel(TfLiteRegistration* registration,
- std::initializer_list<int> input_shape,
- std::initializer_list<int> filter_shape,
- std::initializer_list<int> output_shape_data,
- Padding padding, int stride_w, int stride_h) {
- output_shape_ = AddConstInput(TensorType_INT32, output_shape_data,
- {static_cast<int>(output_shape_data.size())});
- filter_ = AddInput(TensorType_FLOAT32);
- input_ = AddInput(TensorType_FLOAT32);
- output_ = AddOutput(TensorType_FLOAT32);
- SetBuiltinOp(
- BuiltinOperator_TRANSPOSE_CONV, BuiltinOptions_TransposeConvOptions,
- CreateTransposeConvOptions(builder_, padding, stride_w, stride_h)
- .Union());
- resolver_ = absl::make_unique<SingleOpResolver>(
- BuiltinOperator_TRANSPOSE_CONV, registration);
- BuildInterpreter({{4}, filter_shape, input_shape});
- }
-
- int output_shape() { return output_shape_; }
- int filter() { return filter_; }
- int input() { return input_; }
-
- std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
- std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
-
- private:
- int output_shape_;
- int filter_;
- int input_;
- int output_;
-};
-
class TransposeConvOpModel : public SingleOpModel {
public:
- TransposeConvOpModel(TfLiteRegistration* registration,
- std::initializer_list<int> input_shape,
+ TransposeConvOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> filter_shape, Padding padding,
int stride_w, int stride_h) {
output_shape_ = AddInput(TensorType_INT32);
@@ -78,8 +37,6 @@ class TransposeConvOpModel : public SingleOpModel {
BuiltinOperator_TRANSPOSE_CONV, BuiltinOptions_TransposeConvOptions,
CreateTransposeConvOptions(builder_, padding, stride_w, stride_h)
.Union());
- resolver_ = absl::make_unique<SingleOpResolver>(
- BuiltinOperator_TRANSPOSE_CONV, registration);
BuildInterpreter({{4}, filter_shape, input_shape});
}
@@ -97,15 +54,6 @@ class TransposeConvOpModel : public SingleOpModel {
int output_;
};
-const auto kKernelMap = new std::map<string, TfLiteRegistration*>({});
-
-class TransposeConvOpTest : public SingleOpTest {
- protected:
- const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
- return *kKernelMap;
- }
-};
-
// Test case:
// output = tf.nn.conv2d_backprop_input(
// tf.constant([ 1, 4, 4, 1 ]),
@@ -113,9 +61,8 @@ class TransposeConvOpTest : public SingleOpTest {
// tf.constant(np.arange(1, 17), shape=[ 1, 4, 4, 1 ], dtype=tf.float32),
// [1, 1, 1, 1 ],
// "SAME")
-TEST_P(TransposeConvOpTest, SimpleTest) {
- TransposeConvOpModel m(GetRegistration(), {1, 4, 4, 1}, {1, 3, 3, 1},
- Padding_SAME, 1, 1);
+TEST(TransposeConvOpModelTest, SimpleTest) {
+ TransposeConvOpModel m({1, 4, 4, 1}, {1, 3, 3, 1}, Padding_SAME, 1, 1);
m.PopulateTensor<int>(m.output_shape(), {1, 4, 4, 1});
m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9});
m.PopulateTensor<float>(
@@ -128,21 +75,6 @@ TEST_P(TransposeConvOpTest, SimpleTest) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
-// Test case: Same as above, but with a const "output_shape"
-TEST_P(TransposeConvOpTest, ConstSimpleTest) {
- ConstTransposeConvOpModel m(GetRegistration(), {1, 4, 4, 1}, {1, 4, 4, 1},
- {1, 3, 3, 1}, Padding_SAME, 1, 1);
- m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9});
- m.PopulateTensor<float>(
- m.input(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
- m.Invoke();
-
- EXPECT_THAT(m.GetOutput(),
- ElementsAreArray({29, 62, 83, 75, 99, 192, 237, 198, 207, 372,
- 417, 330, 263, 446, 485, 365}));
- EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
-}
-
// Test case:
// filter = tf.constant(np.arange(1, 19),
// shape=[ 3, 3, 1, 2 ],
@@ -155,9 +87,8 @@ TEST_P(TransposeConvOpTest, ConstSimpleTest) {
// "SAME")
// And filter value is derived by:
// filter = tf.reshape(tf.transpose(filter, perm=[3, 0, 1, 2]), shape=[18, 1])
-TEST_P(TransposeConvOpTest, TwoFiltersTest) {
- TransposeConvOpModel m(GetRegistration(), {1, 4, 4, 2}, {1, 3, 3, 2},
- Padding_SAME, 1, 1);
+TEST(TransposeConvOpModelTest, TwoFiltersTest) {
+ TransposeConvOpModel m({1, 4, 4, 2}, {1, 3, 3, 2}, Padding_SAME, 1, 1);
m.PopulateTensor<int>(m.output_shape(), {1, 4, 4, 1});
m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18});
@@ -185,9 +116,8 @@ TEST_P(TransposeConvOpTest, TwoFiltersTest) {
// "VALID")
// And filter value is derived by:
// filter = tf.reshape(tf.transpose(filter, perm=[3, 0, 1, 2]), shape=[1, 18])
-TEST_P(TransposeConvOpTest, PaddingValidTest) {
- TransposeConvOpModel m(GetRegistration(), {1, 4, 4, 2}, {1, 3, 3, 2},
- Padding_VALID, 1, 1);
+TEST(TransposeConvOpModelTest, PaddingValidTest) {
+ TransposeConvOpModel m({1, 4, 4, 2}, {1, 3, 3, 2}, Padding_VALID, 1, 1);
m.PopulateTensor<int>(m.output_shape(), {1, 6, 6, 1});
m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18});
@@ -216,9 +146,8 @@ TEST_P(TransposeConvOpTest, PaddingValidTest) {
// tf.constant(np.arange(1, 5), shape=[ 1, 2, 2, 1 ], dtype=tf.float32),
// [1, 2, 2, 1 ],
// "VALID")
-TEST_P(TransposeConvOpTest, StrideValidTest) {
- TransposeConvOpModel m(GetRegistration(), {1, 2, 2, 1}, {1, 3, 3, 1},
- Padding_VALID, 2, 2);
+TEST(TransposeConvOpModelTest, StrideValidTest) {
+ TransposeConvOpModel m({1, 2, 2, 1}, {1, 3, 3, 1}, Padding_VALID, 2, 2);
m.PopulateTensor<int>(m.output_shape(), {1, 5, 5, 1});
m.PopulateTensor<float>(m.filter(), {1, 2, 3, 4, 5, 6, 7, 8, 9});
m.PopulateTensor<float>(m.input(), {1, 2, 3, 4});
@@ -241,9 +170,8 @@ TEST_P(TransposeConvOpTest, StrideValidTest) {
// tf.constant(np.arange(1, 5), shape=[ 1, 2, 2, 1 ], dtype=tf.float32),
// [1, 2, 2, 1 ],
// "VALID")
-TEST_P(TransposeConvOpTest, MultiChannelTest) {
- TransposeConvOpModel m(GetRegistration(), {1, 2, 2, 1}, {2, 3, 3, 1},
- Padding_VALID, 2, 2);
+TEST(TransposeConvOpModelTest, MultiChannelTest) {
+ TransposeConvOpModel m({1, 2, 2, 1}, {2, 3, 3, 1}, Padding_VALID, 2, 2);
m.PopulateTensor<int>(m.output_shape(), {1, 5, 5, 2});
m.PopulateTensor<float>(m.filter(), {1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6,
8, 10, 12, 14, 16, 18});
@@ -259,24 +187,6 @@ TEST_P(TransposeConvOpTest, MultiChannelTest) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 5, 5, 2}));
}
-// Test case: Same as above, but with a const "output_shape"
-TEST_P(TransposeConvOpTest, ConstMultiChannelTest) {
- ConstTransposeConvOpModel m(GetRegistration(), {1, 2, 2, 1}, {2, 3, 3, 1},
- {1, 5, 5, 2}, Padding_VALID, 2, 2);
- m.PopulateTensor<float>(m.filter(), {1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6,
- 8, 10, 12, 14, 16, 18});
- m.PopulateTensor<float>(m.input(), {1, 2, 3, 4});
- m.Invoke();
-
- EXPECT_THAT(
- m.GetOutput(),
- ElementsAreArray({1, 2, 3, 4, 7, 10, 6, 8, 10, 12, 7, 8, 9,
- 10, 25, 28, 18, 20, 22, 24, 16, 20, 24, 28, 62, 72,
- 42, 48, 54, 60, 21, 24, 27, 30, 61, 68, 36, 40, 44,
- 48, 39, 42, 45, 48, 103, 110, 60, 64, 68, 72}));
- EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 5, 5, 2}));
-}
-
// Test case:
// filter = tf.constant(np.random.randint(1, 10, size=9),
// shape=[ 3, 3, 1, 1 ],
@@ -289,9 +199,8 @@ TEST_P(TransposeConvOpTest, ConstMultiChannelTest) {
// "SAME")
// And filter value is derived by:
// filter = tf.reshape(tf.transpose(filter, perm=[3, 0, 1, 2]), shape=[-1])
-TEST_P(TransposeConvOpTest, AccuracyTest) {
- TransposeConvOpModel m(GetRegistration(), {1, 1, 2, 1}, {1, 3, 3, 1},
- Padding_SAME, 3, 3);
+TEST(TransposeConvOpModelTest, AccuracyTest) {
+ TransposeConvOpModel m({1, 1, 2, 1}, {1, 3, 3, 1}, Padding_SAME, 3, 3);
m.PopulateTensor<int>(m.output_shape(), {1, 3, 4, 1});
m.PopulateTensor<float>(m.filter(), {9, 5, 6, 9, 8, 5, 3, 1, 4});
m.PopulateTensor<float>(m.input(), {323, 521});
@@ -303,10 +212,6 @@ TEST_P(TransposeConvOpTest, AccuracyTest) {
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 4, 1}));
}
-INSTANTIATE_TEST_CASE_P(
- TransposeConvOpTest, TransposeConvOpTest,
- ::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
-
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
index 32daf2bb02..0acd705950 100644
--- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
+++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc
@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
@@ -274,7 +273,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int n_output = recurrent_to_output_weights->dims->data[1];
// Check that input tensor dimensions matches with each other.
- CheckInputTensorDimensions(context, node, n_input, n_output, n_cell);
+ TF_LITE_ENSURE_OK(context, CheckInputTensorDimensions(context, node, n_input,
+ n_output, n_cell));
// Get the pointer to output, output_state and cell_state buffer tensors.
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc
index 164a0cbd08..0d6d29a171 100644
--- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc
+++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc
@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#include <unistd.h>
#include <cassert>
#include <cmath>
#include <cstdio>
diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc
index 93b3df98f3..c6869feb16 100644
--- a/tensorflow/contrib/lite/model.cc
+++ b/tensorflow/contrib/lite/model.cc
@@ -19,7 +19,6 @@ limitations under the License.
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <unistd.h>
#include "tensorflow/contrib/lite/allocation.h"
#include "tensorflow/contrib/lite/builtin_op_data.h"
@@ -616,6 +615,8 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
break;
}
case BuiltinOperator_MEAN:
+ case BuiltinOperator_REDUCE_MAX:
+ case BuiltinOperator_REDUCE_PROD:
case BuiltinOperator_SUM: {
auto* params = MallocPOD<TfLiteReducerParams>();
if (auto* schema_params = op->builtin_options_as_ReducerOptions()) {
@@ -704,6 +705,15 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
*builtin_data = static_cast<void*>(params);
break;
}
+ case BuiltinOperator_PACK: {
+ TfLitePackParams* params = MallocPOD<TfLitePackParams>();
+ if (auto* pack_params = op->builtin_options_as_PackOptions()) {
+ params->values_count = pack_params->values_count();
+ params->axis = pack_params->axis();
+ }
+ *builtin_data = reinterpret_cast<void*>(params);
+ break;
+ }
case BuiltinOperator_DELEGATE: {
// TODO(ycling): Revisit when supporting saving delegated models.
error_reporter->Report("DELEGATE op shouldn't exist in model.");
@@ -715,6 +725,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
params->min = schema_params->min();
params->max = schema_params->max();
params->num_bits = schema_params->num_bits();
+ params->narrow_range = schema_params->narrow_range();
}
*builtin_data = static_cast<void*>(params);
break;
@@ -761,6 +772,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_POW:
+ case BuiltinOperator_LOGICAL_OR:
break;
}
return kTfLiteOk;
diff --git a/tensorflow/contrib/lite/model_test.cc b/tensorflow/contrib/lite/model_test.cc
index 15bae21a41..edfdec9315 100644
--- a/tensorflow/contrib/lite/model_test.cc
+++ b/tensorflow/contrib/lite/model_test.cc
@@ -19,7 +19,6 @@ limitations under the License.
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
-#include <unistd.h>
#include "tensorflow/contrib/lite/model.h"
diff --git a/tensorflow/contrib/lite/models/smartreply/BUILD b/tensorflow/contrib/lite/models/smartreply/BUILD
index 8b5fa240ac..9d88c396ba 100644
--- a/tensorflow/contrib/lite/models/smartreply/BUILD
+++ b/tensorflow/contrib/lite/models/smartreply/BUILD
@@ -47,6 +47,7 @@ cc_test(
name = "extract_feature_op_test",
size = "small",
srcs = ["ops/extract_feature_test.cc"],
+ tags = ["no_oss"],
deps = [
":custom_ops",
"//tensorflow/contrib/lite:framework",
@@ -61,6 +62,7 @@ cc_test(
name = "normalize_op_test",
size = "small",
srcs = ["ops/normalize_test.cc"],
+ tags = ["no_oss"],
deps = [
":custom_ops",
"//tensorflow/contrib/lite:framework",
@@ -75,6 +77,7 @@ cc_test(
name = "predict_op_test",
size = "small",
srcs = ["ops/predict_test.cc"],
+ tags = ["no_oss"],
deps = [
":custom_ops",
"//tensorflow/contrib/lite:framework",
diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc
index cc668485a4..551e8ed320 100644
--- a/tensorflow/contrib/lite/nnapi_delegate.cc
+++ b/tensorflow/contrib/lite/nnapi_delegate.cc
@@ -560,6 +560,14 @@ TfLiteStatus AddOpsAndParams(
nnapi_version = 11; // require NNAPI 1.1
nn_op_type = ANEURALNETWORKS_TRANSPOSE;
break;
+ case tflite::BuiltinOperator_L2_NORMALIZATION:
+ nn_op_type = ANEURALNETWORKS_L2_NORMALIZATION;
+ if (reinterpret_cast<TfLiteL2NormParams*>(node.builtin_data)
+ ->activation != kTfLiteActNone) {
+ FATAL(
+ "NNAPI does not support L2Normalization with fused activations");
+ }
+ break;
case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
case tflite::BuiltinOperator_LSH_PROJECTION:
case tflite::BuiltinOperator_HASHTABLE_LOOKUP:
@@ -568,7 +576,6 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_EMBEDDING_LOOKUP_SPARSE:
case tflite::BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM:
case tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
- case tflite::BuiltinOperator_L2_NORMALIZATION:
case tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION:
case tflite::BuiltinOperator_PADV2:
case tflite::BuiltinOperator_RESIZE_BILINEAR:
@@ -607,11 +614,15 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_EQUAL:
case tflite::BuiltinOperator_NOT_EQUAL:
case tflite::BuiltinOperator_SUM:
+ case tflite::BuiltinOperator_REDUCE_MAX:
+ case tflite::BuiltinOperator_REDUCE_PROD:
case tflite::BuiltinOperator_SQRT:
case tflite::BuiltinOperator_RSQRT:
case tflite::BuiltinOperator_SHAPE:
case tflite::BuiltinOperator_POW:
case tflite::BuiltinOperator_FAKE_QUANT:
+ case tflite::BuiltinOperator_PACK:
+ case tflite::BuiltinOperator_LOGICAL_OR:
logError("Op code %d is currently not delegated to NNAPI", builtin);
return kTfLiteError;
break;
diff --git a/tensorflow/contrib/lite/profiling/BUILD b/tensorflow/contrib/lite/profiling/BUILD
index a162b87b8f..1172722f7a 100644
--- a/tensorflow/contrib/lite/profiling/BUILD
+++ b/tensorflow/contrib/lite/profiling/BUILD
@@ -58,6 +58,7 @@ cc_test(
name = "profile_summarizer_test",
srcs = ["profile_summarizer_test.cc"],
copts = common_copts,
+ tags = ["no_oss"],
deps = [
":profile_summarizer",
"//tensorflow/contrib/lite:framework",
diff --git a/tensorflow/contrib/lite/profiling/profile_summarizer.cc b/tensorflow/contrib/lite/profiling/profile_summarizer.cc
index c37a096588..720bd717b9 100644
--- a/tensorflow/contrib/lite/profiling/profile_summarizer.cc
+++ b/tensorflow/contrib/lite/profiling/profile_summarizer.cc
@@ -23,8 +23,6 @@ namespace tflite {
namespace profiling {
namespace {
-using Detail = tensorflow::StatsCalculator::Detail;
-
struct OperatorDetails {
std::string name;
std::vector<std::string> inputs;
@@ -83,7 +81,7 @@ OperatorDetails GetOperatorDetails(const tflite::Interpreter& interpreter,
OperatorDetails details;
details.name = op_name;
if (profiling_string) {
- details.name += ":" + string(profiling_string);
+ details.name += ":" + std::string(profiling_string);
}
details.inputs = GetTensorNames(interpreter, inputs);
details.outputs = GetTensorNames(interpreter, outputs);
@@ -125,28 +123,17 @@ void ProfileSummarizer::ProcessProfiles(
int64_t base_start_us = events[0]->begin_timestamp_us;
int node_num = 0;
int64_t curr_total_us = 0;
- std::map<std::string, Detail> details;
for (auto event : events) {
auto op_details = GetOperatorDetails(interpreter, event->event_metadata);
auto node_name = ToString(op_details.outputs);
- auto result = details.emplace(node_name, Detail());
- Detail* detail = &(result.first->second);
- detail->start_us.UpdateStat(event->begin_timestamp_us - base_start_us);
+ int64_t start_us = event->begin_timestamp_us - base_start_us;
int64_t node_exec_time =
event->end_timestamp_us - event->begin_timestamp_us;
- detail->rel_end_us.UpdateStat(node_exec_time);
+ stats_calculator_->AddNodeStats(node_name, op_details.name, node_num,
+ start_us, node_exec_time, 0 /*memory */);
curr_total_us += node_exec_time;
++node_num;
-
- if (result.second) {
- detail->name = node_name;
- detail->type = op_details.name;
- detail->run_order = node_num;
- detail->times_called = 0;
- }
- ++detail->times_called;
}
- stats_calculator_->UpdateDetails(details);
stats_calculator_->UpdateRunTotalUs(curr_total_us);
}
} // namespace profiling
diff --git a/tensorflow/contrib/lite/python/BUILD b/tensorflow/contrib/lite/python/BUILD
index 8c9608db04..860aff9e7e 100644
--- a/tensorflow/contrib/lite/python/BUILD
+++ b/tensorflow/contrib/lite/python/BUILD
@@ -20,6 +20,7 @@ py_library(
deps = [
"//tensorflow/contrib/lite/python/interpreter_wrapper:tensorflow_wrap_interpreter_wrapper",
"//tensorflow/python:util",
+ "//third_party/py/numpy",
],
)
@@ -71,7 +72,10 @@ py_test(
srcs = ["lite_test.py"],
data = [":interpreter_test_data"],
srcs_version = "PY2AND3",
- tags = ["no_windows"],
+ tags = [
+ "no_oss",
+ "no_windows",
+ ],
deps = [
":lite",
],
@@ -163,7 +167,10 @@ py_test(
name = "convert_saved_model_test",
srcs = ["convert_saved_model_test.py"],
srcs_version = "PY2AND3",
- tags = ["no_windows"],
+ tags = [
+ "no_oss",
+ "no_windows",
+ ],
visibility = ["//visibility:public"],
deps = [
":convert_saved_model",
diff --git a/tensorflow/contrib/lite/python/convert.py b/tensorflow/contrib/lite/python/convert.py
index 0ea2630f71..ec49738fb5 100644
--- a/tensorflow/contrib/lite/python/convert.py
+++ b/tensorflow/contrib/lite/python/convert.py
@@ -115,6 +115,7 @@ def build_toco_convert_protos(input_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
+ input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
@@ -141,6 +142,8 @@ def build_toco_convert_protos(input_tensors,
Must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`)
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
+ input_shapes: Input array shape. It needs to be a list of the same length
+ as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of integers representing the mean and
@@ -209,7 +212,11 @@ def build_toco_convert_protos(input_tensors,
if inference_type == lite_constants.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
input_array.name = tensor_name(input_tensor)
- input_array.shape.dims.extend(map(int, input_tensor.get_shape()))
+ if input_shapes is None:
+ shape = input_tensor.get_shape()
+ else:
+ shape = input_shapes[idx]
+ input_array.shape.dims.extend(map(int, shape))
for output_tensor in output_tensors:
model.output_arrays.append(tensor_name(output_tensor))
diff --git a/tensorflow/contrib/lite/python/interpreter.py b/tensorflow/contrib/lite/python/interpreter.py
index e1981ceae2..3243bddac8 100644
--- a/tensorflow/contrib/lite/python/interpreter.py
+++ b/tensorflow/contrib/lite/python/interpreter.py
@@ -18,6 +18,7 @@ from __future__ import division
from __future__ import print_function
import sys
+import numpy as np
from tensorflow.python.util.lazy_loader import LazyLoader
# Lazy load since some of the performance benchmark skylark rules
@@ -162,6 +163,9 @@ class Interpreter(object):
ValueError: If the interpreter could not resize the input tensor.
"""
self._ensure_safe()
+ # `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size
+ # parameter.
+ tensor_size = np.array(tensor_size, dtype=np.int32)
self._interpreter.ResizeInputTensor(input_index, tensor_size)
def get_output_details(self):
@@ -204,7 +208,7 @@ class Interpreter(object):
for i in range(10):
input().fill(3.)
interpreter.invoke()
- print("inference %s" % output)
+ print("inference %s" % output())
Notice how this function avoids making a numpy array directly. This is
because it is important to not hold actual numpy views to the data longer
diff --git a/tensorflow/contrib/lite/python/interpreter_test.py b/tensorflow/contrib/lite/python/interpreter_test.py
index 95fa4b8584..e77d52ca99 100644
--- a/tensorflow/contrib/lite/python/interpreter_test.py
+++ b/tensorflow/contrib/lite/python/interpreter_test.py
@@ -83,7 +83,7 @@ class InterpreterTest(test_util.TensorFlowTestCase):
test_input = np.array([[1, 2, 3, 4]], dtype=np.uint8)
expected_output = np.array([[4, 3, 2, 1]], dtype=np.uint8)
interpreter.resize_tensor_input(input_details[0]['index'],
- np.array(test_input.shape, dtype=np.int32))
+ test_input.shape)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], test_input)
interpreter.invoke()
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc
index c38b692dcd..9ab05f3068 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.cc
@@ -108,7 +108,9 @@ std::unique_ptr<tflite::Interpreter> CreateInterpreter(
ImportNumpy();
std::unique_ptr<tflite::Interpreter> interpreter;
- tflite::InterpreterBuilder(*model, resolver)(&interpreter);
+ if (tflite::InterpreterBuilder(*model, resolver)(&interpreter) != kTfLiteOk) {
+ return nullptr;
+ }
return interpreter;
}
@@ -182,13 +184,37 @@ PyObject* PyTupleFromQuantizationParam(const TfLiteQuantizationParams& param) {
} // namespace
+InterpreterWrapper* InterpreterWrapper::CreateInterpreterWrapper(
+ std::unique_ptr<tflite::FlatBufferModel> model,
+ std::unique_ptr<PythonErrorReporter> error_reporter,
+ std::string* error_msg) {
+ if (!model) {
+ *error_msg = error_reporter->message();
+ return nullptr;
+ }
+
+ auto resolver = absl::make_unique<tflite::ops::builtin::BuiltinOpResolver>();
+ auto interpreter = CreateInterpreter(model.get(), *resolver);
+ if (!interpreter) {
+ *error_msg = error_reporter->message();
+ return nullptr;
+ }
+
+ InterpreterWrapper* wrapper =
+ new InterpreterWrapper(std::move(model), std::move(error_reporter),
+ std::move(resolver), std::move(interpreter));
+ return wrapper;
+}
+
InterpreterWrapper::InterpreterWrapper(
std::unique_ptr<tflite::FlatBufferModel> model,
- std::unique_ptr<PythonErrorReporter> error_reporter)
+ std::unique_ptr<PythonErrorReporter> error_reporter,
+ std::unique_ptr<tflite::ops::builtin::BuiltinOpResolver> resolver,
+ std::unique_ptr<tflite::Interpreter> interpreter)
: model_(std::move(model)),
error_reporter_(std::move(error_reporter)),
- resolver_(absl::make_unique<tflite::ops::builtin::BuiltinOpResolver>()),
- interpreter_(CreateInterpreter(model_.get(), *resolver_)) {}
+ resolver_(std::move(resolver)),
+ interpreter_(std::move(interpreter)) {}
InterpreterWrapper::~InterpreterWrapper() {}
@@ -340,6 +366,8 @@ PyObject* InterpreterWrapper::SetTensor(int i, PyObject* value) {
namespace {
+// Checks to see if a tensor access can succeed (returns nullptr on error).
+// Otherwise returns Py_None.
PyObject* CheckGetTensorArgs(Interpreter* interpreter_, int tensor_index,
TfLiteTensor** tensor, int* type_num) {
TFLITE_PY_ENSURE_VALID_INTERPRETER();
@@ -362,7 +390,7 @@ PyObject* CheckGetTensorArgs(Interpreter* interpreter_, int tensor_index,
return nullptr;
}
- return nullptr;
+ Py_RETURN_NONE;
}
} // namespace
@@ -371,10 +399,12 @@ PyObject* InterpreterWrapper::GetTensor(int i) const {
// Sanity check accessor
TfLiteTensor* tensor = nullptr;
int type_num = 0;
- if (PyObject* pynone_or_nullptr =
- CheckGetTensorArgs(interpreter_.get(), i, &tensor, &type_num)) {
- return pynone_or_nullptr;
- }
+
+ PyObject* check_result =
+ CheckGetTensorArgs(interpreter_.get(), i, &tensor, &type_num);
+ if (check_result == nullptr) return check_result;
+ Py_XDECREF(check_result);
+
std::vector<npy_intp> dims(tensor->dims->data,
tensor->dims->data + tensor->dims->size);
// Make a buffer copy but we must tell Numpy It owns that data or else
@@ -396,10 +426,11 @@ PyObject* InterpreterWrapper::tensor(PyObject* base_object, int i) {
// Sanity check accessor
TfLiteTensor* tensor = nullptr;
int type_num = 0;
- if (PyObject* pynone_or_nullptr =
- CheckGetTensorArgs(interpreter_.get(), i, &tensor, &type_num)) {
- return pynone_or_nullptr;
- }
+
+ PyObject* check_result =
+ CheckGetTensorArgs(interpreter_.get(), i, &tensor, &type_num);
+ if (check_result == nullptr) return check_result;
+ Py_XDECREF(check_result);
std::vector<npy_intp> dims(tensor->dims->data,
tensor->dims->data + tensor->dims->size);
@@ -416,11 +447,8 @@ InterpreterWrapper* InterpreterWrapper::CreateWrapperCPPFromFile(
std::unique_ptr<PythonErrorReporter> error_reporter(new PythonErrorReporter);
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromFile(model_path, error_reporter.get());
- if (!model) {
- *error_msg = error_reporter->message();
- return nullptr;
- }
- return new InterpreterWrapper(std::move(model), std::move(error_reporter));
+ return CreateInterpreterWrapper(std::move(model), std::move(error_reporter),
+ error_msg);
}
InterpreterWrapper* InterpreterWrapper::CreateWrapperCPPFromBuffer(
@@ -434,11 +462,8 @@ InterpreterWrapper* InterpreterWrapper::CreateWrapperCPPFromBuffer(
std::unique_ptr<tflite::FlatBufferModel> model =
tflite::FlatBufferModel::BuildFromBuffer(buf, length,
error_reporter.get());
- if (!model) {
- *error_msg = error_reporter->message();
- return nullptr;
- }
- return new InterpreterWrapper(std::move(model), std::move(error_reporter));
+ return CreateInterpreterWrapper(std::move(model), std::move(error_reporter),
+ error_msg);
}
PyObject* InterpreterWrapper::ResetVariableTensorsToZero() {
diff --git a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
index febfd2dc56..3e03751da4 100644
--- a/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
+++ b/tensorflow/contrib/lite/python/interpreter_wrapper/interpreter_wrapper.h
@@ -15,13 +15,13 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_PYTHON_INTERPRETER_WRAPPER_INTERPRETER_WRAPPER_H_
#define TENSORFLOW_CONTRIB_LITE_PYTHON_INTERPRETER_WRAPPER_INTERPRETER_WRAPPER_H_
+// Place `<locale>` before <Python.h> to avoid build failures in macOS.
+#include <locale>
#include <memory>
#include <string>
#include <vector>
-// Place `<locale>` before <Python.h> to avoid build failures in macOS.
#include <Python.h>
-#include <locale>
// We forward declare TFLite classes here to avoid exposing them to SWIG.
namespace tflite {
@@ -69,14 +69,28 @@ class InterpreterWrapper {
PyObject* tensor(PyObject* base_object, int i);
private:
- InterpreterWrapper(std::unique_ptr<tflite::FlatBufferModel> model,
- std::unique_ptr<PythonErrorReporter> error_reporter);
+ // Helper function to construct an `InterpreterWrapper` object.
+ // It only returns InterpreterWrapper if it can construct an `Interpreter`.
+ // Otherwise it returns `nullptr`.
+ static InterpreterWrapper* CreateInterpreterWrapper(
+ std::unique_ptr<tflite::FlatBufferModel> model,
+ std::unique_ptr<PythonErrorReporter> error_reporter,
+ std::string* error_msg);
+
+ InterpreterWrapper(
+ std::unique_ptr<tflite::FlatBufferModel> model,
+ std::unique_ptr<PythonErrorReporter> error_reporter,
+ std::unique_ptr<tflite::ops::builtin::BuiltinOpResolver> resolver,
+ std::unique_ptr<tflite::Interpreter> interpreter);
// InterpreterWrapper is not copyable or assignable. We avoid the use of
// InterpreterWrapper() = delete here for SWIG compatibility.
InterpreterWrapper();
InterpreterWrapper(const InterpreterWrapper& rhs);
+ // The public functions which creates `InterpreterWrapper` should ensure all
+ // these member variables are initialized successfully. Otherwise it should
+ // report the error and return `nullptr`.
const std::unique_ptr<tflite::FlatBufferModel> model_;
const std::unique_ptr<PythonErrorReporter> error_reporter_;
const std::unique_ptr<tflite::ops::builtin::BuiltinOpResolver> resolver_;
diff --git a/tensorflow/contrib/lite/python/lite.py b/tensorflow/contrib/lite/python/lite.py
index 29a1487c1f..2f9b9d469a 100644
--- a/tensorflow/contrib/lite/python/lite.py
+++ b/tensorflow/contrib/lite/python/lite.py
@@ -40,24 +40,23 @@ from google.protobuf import text_format as _text_format
from google.protobuf.message import DecodeError
from tensorflow.contrib.lite.python import lite_constants as constants
from tensorflow.contrib.lite.python.convert import build_toco_convert_protos # pylint: disable=unused-import
-from tensorflow.contrib.lite.python.convert import tensor_name
+from tensorflow.contrib.lite.python.convert import tensor_name as _tensor_name
from tensorflow.contrib.lite.python.convert import toco_convert
from tensorflow.contrib.lite.python.convert import toco_convert_protos # pylint: disable=unused-import
-from tensorflow.contrib.lite.python.convert_saved_model import freeze_saved_model
-from tensorflow.contrib.lite.python.convert_saved_model import get_tensors_from_tensor_names
-from tensorflow.contrib.lite.python.convert_saved_model import set_tensor_shapes
+from tensorflow.contrib.lite.python.convert_saved_model import freeze_saved_model as _freeze_saved_model
+from tensorflow.contrib.lite.python.convert_saved_model import get_tensors_from_tensor_names as _get_tensors_from_tensor_names
+from tensorflow.contrib.lite.python.convert_saved_model import set_tensor_shapes as _set_tensor_shapes
from tensorflow.contrib.lite.python.interpreter import Interpreter # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import convert_op_hints_to_stubs # pylint: disable=unused-import
from tensorflow.contrib.lite.python.op_hint import OpHint # pylint: disable=unused-import
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.python import keras as _keras
from tensorflow.python.client import session as _session
-from tensorflow.python.framework import graph_util as tf_graph_util
-from tensorflow.python.framework.importer import import_graph_def
-from tensorflow.python.ops.variables import global_variables_initializer
-from tensorflow.python.saved_model import signature_constants
-from tensorflow.python.saved_model import tag_constants
-# from tensorflow.python.util.all_util import remove_undocumented
+from tensorflow.python.framework import graph_util as _tf_graph_util
+from tensorflow.python.framework.importer import import_graph_def as _import_graph_def
+from tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer
+from tensorflow.python.saved_model import signature_constants as _signature_constants
+from tensorflow.python.saved_model import tag_constants as _tag_constants
class TocoConverter(object):
@@ -196,7 +195,7 @@ class TocoConverter(object):
input_arrays or output_arrays contains an invalid tensor name.
"""
with _session.Session() as sess:
- sess.run(global_variables_initializer())
+ sess.run(_global_variables_initializer())
# Read GraphDef from file.
graph_def = _graph_pb2.GraphDef()
@@ -218,12 +217,12 @@ class TocoConverter(object):
raise ValueError(
"Unable to parse input file '{}'.".format(graph_def_file))
sess.graph.as_default()
- import_graph_def(graph_def, name="")
+ _import_graph_def(graph_def, name="")
# Get input and output tensors.
- input_tensors = get_tensors_from_tensor_names(sess.graph, input_arrays)
- output_tensors = get_tensors_from_tensor_names(sess.graph, output_arrays)
- set_tensor_shapes(input_tensors, input_shapes)
+ input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
+ output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
+ _set_tensor_shapes(input_tensors, input_shapes)
# Check if graph is frozen.
if not _is_frozen_graph(sess):
@@ -261,12 +260,12 @@ class TocoConverter(object):
TocoConverter class.
"""
if tag_set is None:
- tag_set = set([tag_constants.SERVING])
+ tag_set = set([_tag_constants.SERVING])
if signature_key is None:
- signature_key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
+ signature_key = _signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
- result = freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
- output_arrays, tag_set, signature_key)
+ result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
+ output_arrays, tag_set, signature_key)
return cls(
graph_def=result[0], input_tensors=result[1], output_tensors=result[2])
@@ -299,15 +298,15 @@ class TocoConverter(object):
# Get input and output tensors.
if input_arrays:
- input_tensors = get_tensors_from_tensor_names(sess.graph, input_arrays)
+ input_tensors = _get_tensors_from_tensor_names(sess.graph, input_arrays)
else:
input_tensors = keras_model.inputs
if output_arrays:
- output_tensors = get_tensors_from_tensor_names(sess.graph, output_arrays)
+ output_tensors = _get_tensors_from_tensor_names(sess.graph, output_arrays)
else:
output_tensors = keras_model.outputs
- set_tensor_shapes(input_tensors, input_shapes)
+ _set_tensor_shapes(input_tensors, input_shapes)
graph_def = _freeze_graph(sess, output_tensors)
return cls(graph_def, input_tensors, output_tensors)
@@ -328,12 +327,12 @@ class TocoConverter(object):
for tensor in self._input_tensors:
if not tensor.get_shape():
raise ValueError("Provide an input shape for input array '{0}'.".format(
- tensor_name(tensor)))
+ _tensor_name(tensor)))
shape = tensor.get_shape().as_list()
if None in shape[1:]:
raise ValueError(
"None is only supported in the 1st dimension. Tensor '{0}' has "
- "invalid shape '{1}'.".format(tensor_name(tensor), shape))
+ "invalid shape '{1}'.".format(_tensor_name(tensor), shape))
elif shape[0] is None:
self._set_batch_size(batch_size=1)
@@ -343,7 +342,7 @@ class TocoConverter(object):
quantized_stats = []
invalid_stats = []
for tensor in self._input_tensors:
- name = tensor_name(tensor)
+ name = _tensor_name(tensor)
if name in self.quantized_input_stats:
quantized_stats.append(self.quantized_input_stats[name])
else:
@@ -381,7 +380,7 @@ class TocoConverter(object):
Returns:
List of strings.
"""
- return [tensor_name(tensor) for tensor in self._input_tensors]
+ return [_tensor_name(tensor) for tensor in self._input_tensors]
def _set_batch_size(self, batch_size):
"""Sets the first dimension of the input tensor to `batch_size`.
@@ -428,11 +427,9 @@ def _freeze_graph(sess, output_tensors):
Frozen GraphDef.
"""
if not _is_frozen_graph(sess):
- sess.run(global_variables_initializer())
- output_arrays = [tensor_name(tensor) for tensor in output_tensors]
- return tf_graph_util.convert_variables_to_constants(sess, sess.graph_def,
- output_arrays)
+ sess.run(_global_variables_initializer())
+ output_arrays = [_tensor_name(tensor) for tensor in output_tensors]
+ return _tf_graph_util.convert_variables_to_constants(
+ sess, sess.graph_def, output_arrays)
else:
return sess.graph_def
-
-# remove_undocumented(__name__)
diff --git a/tensorflow/contrib/lite/python/tflite_convert.py b/tensorflow/contrib/lite/python/tflite_convert.py
index 9bd1f4f76e..d17482e601 100644
--- a/tensorflow/contrib/lite/python/tflite_convert.py
+++ b/tensorflow/contrib/lite/python/tflite_convert.py
@@ -257,7 +257,7 @@ def run_main(_):
parser.add_argument(
"--input_arrays",
type=str,
- help="Names of the output arrays, comma-separated.")
+ help="Names of the input arrays, comma-separated.")
parser.add_argument(
"--input_shapes",
type=str,
diff --git a/tensorflow/contrib/lite/schema/BUILD b/tensorflow/contrib/lite/schema/BUILD
index f095151cae..b616e449e6 100644
--- a/tensorflow/contrib/lite/schema/BUILD
+++ b/tensorflow/contrib/lite/schema/BUILD
@@ -30,7 +30,10 @@ py_test(
size = "small",
srcs = ["upgrade_schema_test.py"],
srcs_version = "PY2AND3",
- tags = ["no_pip"],
+ tags = [
+ "no_oss",
+ "no_pip",
+ ],
deps = [
":upgrade_schema",
"//tensorflow/python:client_testlib",
@@ -64,6 +67,7 @@ cc_test(
"schema_v3.fbs",
],
tags = [
+ "no_oss",
"tflite_not_portable_android",
"tflite_not_portable_ios",
],
diff --git a/tensorflow/contrib/lite/schema/builtin_ops_header/BUILD b/tensorflow/contrib/lite/schema/builtin_ops_header/BUILD
index 0148149a6a..4a627761da 100644
--- a/tensorflow/contrib/lite/schema/builtin_ops_header/BUILD
+++ b/tensorflow/contrib/lite/schema/builtin_ops_header/BUILD
@@ -24,6 +24,7 @@ cc_binary(
cc_test(
name = "generator_test",
srcs = ["generator_test.cc"],
+ tags = ["no_oss"],
deps = [
":generator",
"@com_google_googletest//:gtest",
@@ -36,6 +37,7 @@ cc_test(
data = [
"//tensorflow/contrib/lite:builtin_ops.h",
],
+ tags = ["no_oss"],
deps = [
":generator",
"@com_google_googletest//:gtest",
diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs
index 17ea26052d..a285bf9919 100644
--- a/tensorflow/contrib/lite/schema/schema.fbs
+++ b/tensorflow/contrib/lite/schema/schema.fbs
@@ -155,13 +155,17 @@ enum BuiltinOperator : byte {
EQUAL = 71,
NOT_EQUAL = 72,
LOG = 73,
- SUM=74,
+ SUM = 74,
SQRT = 75,
RSQRT = 76,
SHAPE = 77,
POW = 78,
ARG_MIN = 79,
FAKE_QUANT = 80,
+ REDUCE_PROD = 81,
+ REDUCE_MAX = 82,
+ PACK = 83,
+ LOGICAL_OR = 84,
}
// Options for the builtin operators.
@@ -224,6 +228,8 @@ union BuiltinOptions {
PowOptions,
ArgMinOptions,
FakeQuantOptions,
+ PackOptions,
+ LogicalOrOptions,
}
enum Padding : byte { SAME, VALID }
@@ -526,9 +532,21 @@ table PowOptions {
}
table FakeQuantOptions {
+ // Parameters supported by version 1:
min:float;
max:float;
num_bits:int;
+
+ // Parameters supported by version 2:
+ narrow_range:bool;
+}
+
+table PackOptions {
+ values_count:int;
+ axis:int;
+}
+
+table LogicalOrOptions {
}
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h
index 37489ebc68..8c1d6d6a36 100755
--- a/tensorflow/contrib/lite/schema/schema_generated.h
+++ b/tensorflow/contrib/lite/schema/schema_generated.h
@@ -205,6 +205,12 @@ struct PowOptionsT;
struct FakeQuantOptions;
struct FakeQuantOptionsT;
+struct PackOptions;
+struct PackOptionsT;
+
+struct LogicalOrOptions;
+struct LogicalOrOptionsT;
+
struct OperatorCode;
struct OperatorCodeT;
@@ -351,11 +357,15 @@ enum BuiltinOperator {
BuiltinOperator_POW = 78,
BuiltinOperator_ARG_MIN = 79,
BuiltinOperator_FAKE_QUANT = 80,
+ BuiltinOperator_REDUCE_PROD = 81,
+ BuiltinOperator_REDUCE_MAX = 82,
+ BuiltinOperator_PACK = 83,
+ BuiltinOperator_LOGICAL_OR = 84,
BuiltinOperator_MIN = BuiltinOperator_ADD,
- BuiltinOperator_MAX = BuiltinOperator_FAKE_QUANT
+ BuiltinOperator_MAX = BuiltinOperator_LOGICAL_OR
};
-inline BuiltinOperator (&EnumValuesBuiltinOperator())[80] {
+inline BuiltinOperator (&EnumValuesBuiltinOperator())[84] {
static BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
@@ -436,7 +446,11 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[80] {
BuiltinOperator_SHAPE,
BuiltinOperator_POW,
BuiltinOperator_ARG_MIN,
- BuiltinOperator_FAKE_QUANT
+ BuiltinOperator_FAKE_QUANT,
+ BuiltinOperator_REDUCE_PROD,
+ BuiltinOperator_REDUCE_MAX,
+ BuiltinOperator_PACK,
+ BuiltinOperator_LOGICAL_OR
};
return values;
}
@@ -524,6 +538,10 @@ inline const char **EnumNamesBuiltinOperator() {
"POW",
"ARG_MIN",
"FAKE_QUANT",
+ "REDUCE_PROD",
+ "REDUCE_MAX",
+ "PACK",
+ "LOGICAL_OR",
nullptr
};
return names;
@@ -594,11 +612,13 @@ enum BuiltinOptions {
BuiltinOptions_PowOptions = 56,
BuiltinOptions_ArgMinOptions = 57,
BuiltinOptions_FakeQuantOptions = 58,
+ BuiltinOptions_PackOptions = 59,
+ BuiltinOptions_LogicalOrOptions = 60,
BuiltinOptions_MIN = BuiltinOptions_NONE,
- BuiltinOptions_MAX = BuiltinOptions_FakeQuantOptions
+ BuiltinOptions_MAX = BuiltinOptions_LogicalOrOptions
};
-inline BuiltinOptions (&EnumValuesBuiltinOptions())[59] {
+inline BuiltinOptions (&EnumValuesBuiltinOptions())[61] {
static BuiltinOptions values[] = {
BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions,
@@ -658,7 +678,9 @@ inline BuiltinOptions (&EnumValuesBuiltinOptions())[59] {
BuiltinOptions_ShapeOptions,
BuiltinOptions_PowOptions,
BuiltinOptions_ArgMinOptions,
- BuiltinOptions_FakeQuantOptions
+ BuiltinOptions_FakeQuantOptions,
+ BuiltinOptions_PackOptions,
+ BuiltinOptions_LogicalOrOptions
};
return values;
}
@@ -724,6 +746,8 @@ inline const char **EnumNamesBuiltinOptions() {
"PowOptions",
"ArgMinOptions",
"FakeQuantOptions",
+ "PackOptions",
+ "LogicalOrOptions",
nullptr
};
return names;
@@ -970,6 +994,14 @@ template<> struct BuiltinOptionsTraits<FakeQuantOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions;
};
+template<> struct BuiltinOptionsTraits<PackOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_PackOptions;
+};
+
+template<> struct BuiltinOptionsTraits<LogicalOrOptions> {
+ static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions;
+};
+
struct BuiltinOptionsUnion {
BuiltinOptions type;
void *value;
@@ -1465,6 +1497,22 @@ struct BuiltinOptionsUnion {
return type == BuiltinOptions_FakeQuantOptions ?
reinterpret_cast<const FakeQuantOptionsT *>(value) : nullptr;
}
+ PackOptionsT *AsPackOptions() {
+ return type == BuiltinOptions_PackOptions ?
+ reinterpret_cast<PackOptionsT *>(value) : nullptr;
+ }
+ const PackOptionsT *AsPackOptions() const {
+ return type == BuiltinOptions_PackOptions ?
+ reinterpret_cast<const PackOptionsT *>(value) : nullptr;
+ }
+ LogicalOrOptionsT *AsLogicalOrOptions() {
+ return type == BuiltinOptions_LogicalOrOptions ?
+ reinterpret_cast<LogicalOrOptionsT *>(value) : nullptr;
+ }
+ const LogicalOrOptionsT *AsLogicalOrOptions() const {
+ return type == BuiltinOptions_LogicalOrOptions ?
+ reinterpret_cast<const LogicalOrOptionsT *>(value) : nullptr;
+ }
};
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
@@ -5213,10 +5261,12 @@ struct FakeQuantOptionsT : public flatbuffers::NativeTable {
float min;
float max;
int32_t num_bits;
+ bool narrow_range;
FakeQuantOptionsT()
: min(0.0f),
max(0.0f),
- num_bits(0) {
+ num_bits(0),
+ narrow_range(false) {
}
};
@@ -5225,7 +5275,8 @@ struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
enum {
VT_MIN = 4,
VT_MAX = 6,
- VT_NUM_BITS = 8
+ VT_NUM_BITS = 8,
+ VT_NARROW_RANGE = 10
};
float min() const {
return GetField<float>(VT_MIN, 0.0f);
@@ -5236,11 +5287,15 @@ struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
int32_t num_bits() const {
return GetField<int32_t>(VT_NUM_BITS, 0);
}
+ bool narrow_range() const {
+ return GetField<uint8_t>(VT_NARROW_RANGE, 0) != 0;
+ }
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<float>(verifier, VT_MIN) &&
VerifyField<float>(verifier, VT_MAX) &&
VerifyField<int32_t>(verifier, VT_NUM_BITS) &&
+ VerifyField<uint8_t>(verifier, VT_NARROW_RANGE) &&
verifier.EndTable();
}
FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
@@ -5260,6 +5315,9 @@ struct FakeQuantOptionsBuilder {
void add_num_bits(int32_t num_bits) {
fbb_.AddElement<int32_t>(FakeQuantOptions::VT_NUM_BITS, num_bits, 0);
}
+ void add_narrow_range(bool narrow_range) {
+ fbb_.AddElement<uint8_t>(FakeQuantOptions::VT_NARROW_RANGE, static_cast<uint8_t>(narrow_range), 0);
+ }
explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
@@ -5276,16 +5334,124 @@ inline flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(
flatbuffers::FlatBufferBuilder &_fbb,
float min = 0.0f,
float max = 0.0f,
- int32_t num_bits = 0) {
+ int32_t num_bits = 0,
+ bool narrow_range = false) {
FakeQuantOptionsBuilder builder_(_fbb);
builder_.add_num_bits(num_bits);
builder_.add_max(max);
builder_.add_min(min);
+ builder_.add_narrow_range(narrow_range);
return builder_.Finish();
}
flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+struct PackOptionsT : public flatbuffers::NativeTable {
+ typedef PackOptions TableType;
+ int32_t values_count;
+ int32_t axis;
+ PackOptionsT()
+ : values_count(0),
+ axis(0) {
+ }
+};
+
+struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef PackOptionsT NativeTableType;
+ enum {
+ VT_VALUES_COUNT = 4,
+ VT_AXIS = 6
+ };
+ int32_t values_count() const {
+ return GetField<int32_t>(VT_VALUES_COUNT, 0);
+ }
+ int32_t axis() const {
+ return GetField<int32_t>(VT_AXIS, 0);
+ }
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ VerifyField<int32_t>(verifier, VT_VALUES_COUNT) &&
+ VerifyField<int32_t>(verifier, VT_AXIS) &&
+ verifier.EndTable();
+ }
+ PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<PackOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct PackOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ void add_values_count(int32_t values_count) {
+ fbb_.AddElement<int32_t>(PackOptions::VT_VALUES_COUNT, values_count, 0);
+ }
+ void add_axis(int32_t axis) {
+ fbb_.AddElement<int32_t>(PackOptions::VT_AXIS, axis, 0);
+ }
+ explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ PackOptionsBuilder &operator=(const PackOptionsBuilder &);
+ flatbuffers::Offset<PackOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<PackOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<PackOptions> CreatePackOptions(
+ flatbuffers::FlatBufferBuilder &_fbb,
+ int32_t values_count = 0,
+ int32_t axis = 0) {
+ PackOptionsBuilder builder_(_fbb);
+ builder_.add_axis(axis);
+ builder_.add_values_count(values_count);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<PackOptions> CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
+struct LogicalOrOptionsT : public flatbuffers::NativeTable {
+ typedef LogicalOrOptions TableType;
+ LogicalOrOptionsT() {
+ }
+};
+
+struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
+ typedef LogicalOrOptionsT NativeTableType;
+ bool Verify(flatbuffers::Verifier &verifier) const {
+ return VerifyTableStart(verifier) &&
+ verifier.EndTable();
+ }
+ LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
+ static flatbuffers::Offset<LogicalOrOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+};
+
+struct LogicalOrOptionsBuilder {
+ flatbuffers::FlatBufferBuilder &fbb_;
+ flatbuffers::uoffset_t start_;
+ explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
+ : fbb_(_fbb) {
+ start_ = fbb_.StartTable();
+ }
+ LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &);
+ flatbuffers::Offset<LogicalOrOptions> Finish() {
+ const auto end = fbb_.EndTable(start_);
+ auto o = flatbuffers::Offset<LogicalOrOptions>(end);
+ return o;
+ }
+};
+
+inline flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(
+ flatbuffers::FlatBufferBuilder &_fbb) {
+ LogicalOrOptionsBuilder builder_(_fbb);
+ return builder_.Finish();
+}
+
+flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
+
struct OperatorCodeT : public flatbuffers::NativeTable {
typedef OperatorCode TableType;
BuiltinOperator builtin_code;
@@ -5593,6 +5759,12 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const FakeQuantOptions *builtin_options_as_FakeQuantOptions() const {
return builtin_options_type() == BuiltinOptions_FakeQuantOptions ? static_cast<const FakeQuantOptions *>(builtin_options()) : nullptr;
}
+ const PackOptions *builtin_options_as_PackOptions() const {
+ return builtin_options_type() == BuiltinOptions_PackOptions ? static_cast<const PackOptions *>(builtin_options()) : nullptr;
+ }
+ const LogicalOrOptions *builtin_options_as_LogicalOrOptions() const {
+ return builtin_options_type() == BuiltinOptions_LogicalOrOptions ? static_cast<const LogicalOrOptions *>(builtin_options()) : nullptr;
+ }
const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
}
@@ -5856,6 +6028,14 @@ template<> inline const FakeQuantOptions *Operator::builtin_options_as<FakeQuant
return builtin_options_as_FakeQuantOptions();
}
+template<> inline const PackOptions *Operator::builtin_options_as<PackOptions>() const {
+ return builtin_options_as_PackOptions();
+}
+
+template<> inline const LogicalOrOptions *Operator::builtin_options_as<LogicalOrOptions>() const {
+ return builtin_options_as_LogicalOrOptions();
+}
+
struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
@@ -7896,6 +8076,7 @@ inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers:
{ auto _e = min(); _o->min = _e; };
{ auto _e = max(); _o->max = _e; };
{ auto _e = num_bits(); _o->num_bits = _e; };
+ { auto _e = narrow_range(); _o->narrow_range = _e; };
}
inline flatbuffers::Offset<FakeQuantOptions> FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
@@ -7909,11 +8090,65 @@ inline flatbuffers::Offset<FakeQuantOptions> CreateFakeQuantOptions(flatbuffers:
auto _min = _o->min;
auto _max = _o->max;
auto _num_bits = _o->num_bits;
+ auto _narrow_range = _o->narrow_range;
return tflite::CreateFakeQuantOptions(
_fbb,
_min,
_max,
- _num_bits);
+ _num_bits,
+ _narrow_range);
+}
+
+inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new PackOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+ { auto _e = values_count(); _o->values_count = _e; };
+ { auto _e = axis(); _o->axis = _e; };
+}
+
+inline flatbuffers::Offset<PackOptions> PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreatePackOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<PackOptions> CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ auto _values_count = _o->values_count;
+ auto _axis = _o->axis;
+ return tflite::CreatePackOptions(
+ _fbb,
+ _values_count,
+ _axis);
+}
+
+inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
+ auto _o = new LogicalOrOptionsT();
+ UnPackTo(_o, _resolver);
+ return _o;
+}
+
+inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
+ (void)_o;
+ (void)_resolver;
+}
+
+inline flatbuffers::Offset<LogicalOrOptions> LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
+ return CreateLogicalOrOptions(_fbb, _o, _rehasher);
+}
+
+inline flatbuffers::Offset<LogicalOrOptions> CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
+ (void)_rehasher;
+ (void)_o;
+ struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
+ return tflite::CreateLogicalOrOptions(
+ _fbb);
}
inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
@@ -8337,6 +8572,14 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob
auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
return verifier.VerifyTable(ptr);
}
+ case BuiltinOptions_PackOptions: {
+ auto ptr = reinterpret_cast<const PackOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
+ case BuiltinOptions_LogicalOrOptions: {
+ auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj);
+ return verifier.VerifyTable(ptr);
+ }
default: return false;
}
}
@@ -8587,6 +8830,14 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c
auto ptr = reinterpret_cast<const FakeQuantOptions *>(obj);
return ptr->UnPack(resolver);
}
+ case BuiltinOptions_PackOptions: {
+ auto ptr = reinterpret_cast<const PackOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
+ case BuiltinOptions_LogicalOrOptions: {
+ auto ptr = reinterpret_cast<const LogicalOrOptions *>(obj);
+ return ptr->UnPack(resolver);
+ }
default: return nullptr;
}
}
@@ -8825,6 +9076,14 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff
auto ptr = reinterpret_cast<const FakeQuantOptionsT *>(value);
return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union();
}
+ case BuiltinOptions_PackOptions: {
+ auto ptr = reinterpret_cast<const PackOptionsT *>(value);
+ return CreatePackOptions(_fbb, ptr, _rehasher).Union();
+ }
+ case BuiltinOptions_LogicalOrOptions: {
+ auto ptr = reinterpret_cast<const LogicalOrOptionsT *>(value);
+ return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union();
+ }
default: return 0;
}
}
@@ -9063,6 +9322,14 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL
value = new FakeQuantOptionsT(*reinterpret_cast<FakeQuantOptionsT *>(u.value));
break;
}
+ case BuiltinOptions_PackOptions: {
+ value = new PackOptionsT(*reinterpret_cast<PackOptionsT *>(u.value));
+ break;
+ }
+ case BuiltinOptions_LogicalOrOptions: {
+ value = new LogicalOrOptionsT(*reinterpret_cast<LogicalOrOptionsT *>(u.value));
+ break;
+ }
default:
break;
}
@@ -9360,6 +9627,16 @@ inline void BuiltinOptionsUnion::Reset() {
delete ptr;
break;
}
+ case BuiltinOptions_PackOptions: {
+ auto ptr = reinterpret_cast<PackOptionsT *>(value);
+ delete ptr;
+ break;
+ }
+ case BuiltinOptions_LogicalOrOptions: {
+ auto ptr = reinterpret_cast<LogicalOrOptionsT *>(value);
+ delete ptr;
+ break;
+ }
default: break;
}
value = nullptr;
diff --git a/tensorflow/contrib/lite/simple_memory_arena.cc b/tensorflow/contrib/lite/simple_memory_arena.cc
index 4eaf6f1bfe..24593d2a67 100644
--- a/tensorflow/contrib/lite/simple_memory_arena.cc
+++ b/tensorflow/contrib/lite/simple_memory_arena.cc
@@ -34,7 +34,7 @@ namespace tflite {
TfLiteStatus SimpleMemoryArena::Allocate(TfLiteContext* context,
size_t alignment, size_t size,
ArenaAlloc* new_alloc) {
- TF_LITE_ENSURE(context, alignment < arena_alignment_);
+ TF_LITE_ENSURE(context, alignment <= arena_alignment_);
if (size == 0) {
new_alloc->offset = 0;
diff --git a/tensorflow/contrib/lite/testdata/add.bin b/tensorflow/contrib/lite/testdata/add.bin
new file mode 100644
index 0000000000..aef0fe3d82
--- /dev/null
+++ b/tensorflow/contrib/lite/testdata/add.bin
Binary files differ
diff --git a/tensorflow/contrib/lite/testing/BUILD b/tensorflow/contrib/lite/testing/BUILD
index 789bc695f8..6c7f494e9b 100644
--- a/tensorflow/contrib/lite/testing/BUILD
+++ b/tensorflow/contrib/lite/testing/BUILD
@@ -140,6 +140,7 @@ cc_test(
cc_library(
name = "join",
hdrs = ["join.h"],
+ deps = ["//tensorflow/contrib/lite:string"],
)
cc_test(
@@ -268,6 +269,7 @@ cc_library(
":join",
":split",
":tf_driver",
+ "//tensorflow/contrib/lite:string",
"//tensorflow/core:framework",
],
)
@@ -333,7 +335,6 @@ tf_cc_test(
],
tags = [
"no_cuda_on_cpu_tap",
- "no_oss",
"tflite_not_portable",
],
deps = [
diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py
index 1093bd2cbe..41ece94237 100644
--- a/tensorflow/contrib/lite/testing/generate_examples.py
+++ b/tensorflow/contrib/lite/testing/generate_examples.py
@@ -772,6 +772,11 @@ def make_binary_op_tests(zip_path, binary_operator):
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True]
+ }, {
+ "dtype": [tf.float32],
+ "input_shape_1": [[]],
+ "input_shape_2": [[]],
+ "activation": [False]
}]
def build_graph(parameters):
@@ -821,7 +826,7 @@ def make_reduce_tests(reduce_op):
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 2, 4]],
"axis": [
- None, 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
+ 0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1], [-1, 0],
[-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
@@ -831,13 +836,19 @@ def make_reduce_tests(reduce_op):
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
- None, 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3],
+ 0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2, 3],
[3, 2, 1, 0], [3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2,
-3, -4, [0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
+ }, {
+ "input_dtype": [tf.float32],
+ "input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
+ "axis": [None],
+ "const_axis": [True],
+ "keepdims": [True, False],
}]
def build_graph(parameters):
@@ -855,7 +866,7 @@ def make_reduce_tests(reduce_op):
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
- shape = [0] # shape for None or integers.
+ shape = [] # shape for None or integers.
axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
@@ -866,10 +877,11 @@ def make_reduce_tests(reduce_op):
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
- parameters["input_shape"])]
+ parameters["input_shape"],
+ min_value=-10,
+ max_value=10)]
if not parameters["const_axis"]:
- if parameters["axis"]:
- values.append(np.array(parameters["axis"]))
+ values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
@@ -879,22 +891,30 @@ def make_reduce_tests(reduce_op):
def make_mean_tests(zip_path):
"""Make a set of tests to do mean."""
-
return make_reduce_tests(tf.reduce_mean)(zip_path)
def make_sum_tests(zip_path):
"""Make a set of tests to do sum."""
-
return make_reduce_tests(tf.reduce_sum)(zip_path)
+def make_reduce_prod_tests(zip_path):
+ """Make a set of tests to do prod."""
+ return make_reduce_tests(tf.reduce_prod)(zip_path)
+
+
+def make_reduce_max_tests(zip_path):
+ """Make a set of tests to do max."""
+ return make_reduce_tests(tf.reduce_max)(zip_path)
+
+
def make_exp_tests(zip_path):
"""Make a set of tests to do exp."""
test_parameters = [{
"input_dtype": [tf.float32],
- "input_shape": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ "input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
@@ -953,8 +973,8 @@ def make_maximum_tests(zip_path):
test_parameters = [{
"input_dtype": [tf.float32],
- "input_shape_1": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
- "input_shape_2": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ "input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ "input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
@@ -988,8 +1008,8 @@ def make_minimum_tests(zip_path):
test_parameters = [{
"input_dtype": [tf.float32],
- "input_shape_1": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
- "input_shape_2": [[3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ "input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
+ "input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
@@ -1587,19 +1607,34 @@ def make_reshape_tests(zip_path):
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
+ "constant_shape": [True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
- out = tf.reshape(input_tensor, shape=parameters["output_shape"])
- return [input_tensor], [out]
+
+ # Get shape as either a placeholder or constants.
+ if parameters["constant_shape"]:
+ output_shape = parameters["output_shape"]
+ input_tensors = [input_tensor]
+ else:
+ # The shape of the shape tensor.
+ shape_tensor_shape = [len(parameters["output_shape"])]
+ output_shape = tf.placeholder(
+ dtype=tf.int32, name="output_shape", shape=shape_tensor_shape)
+ input_tensors = [input_tensor, output_shape]
+ out = tf.reshape(input_tensor, shape=output_shape)
+ return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
- input_values = create_tensor_data(parameters["dtype"],
- parameters["input_shape"])
- return [input_values], sess.run(
- outputs, feed_dict=dict(zip(inputs, [input_values])))
+ values = [
+ create_tensor_data(parameters["dtype"], parameters["input_shape"])
+ ]
+ if not parameters["constant_shape"]:
+ values.append(np.array(parameters["output_shape"]))
+
+ return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
@@ -2229,7 +2264,7 @@ def make_arg_min_max_tests(zip_path):
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
- "input_shape": [[1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
+ "input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"axis_is_last_dim": [True, False],
"is_arg_max": [True],
@@ -2265,7 +2300,8 @@ def make_equal_tests(zip_path):
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
- "input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
+ "input_shape_pair": [([], []),
+ ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
@@ -2522,7 +2558,7 @@ def _make_elementwise_tests(op):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
- "input_shape": [[1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
+ "input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
@@ -2844,6 +2880,44 @@ def make_sparse_to_dense_tests(zip_path):
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+def make_pack_tests(zip_path):
+ """Make a set of tests to do stack."""
+
+ test_parameters = [{
+ "base_shape": [[3, 4, 3], [3, 4], [5]],
+ "num_tensors": [1, 2, 3, 4, 5, 6],
+ "axis": [0, 1, 2, 3],
+ "additional_shape": [1, 2, 3],
+ }]
+
+ def get_shape(parameters):
+ """Return a tweaked version of 'base_shape'."""
+ axis = parameters["axis"]
+ shape = parameters["base_shape"][:]
+ if axis < len(shape):
+ shape[axis] += parameters["additional_shape"]
+ return shape
+
+ def build_graph(parameters):
+ all_tensors = []
+ for n in range(0, parameters["num_tensors"]):
+ input_tensor = tf.placeholder(
+ dtype=tf.float32, name=("input%d" % n), shape=get_shape(parameters))
+ all_tensors.append(input_tensor)
+ out = tf.stack(all_tensors, parameters["axis"])
+ return all_tensors, [out]
+
+ def build_inputs(parameters, sess, inputs, outputs):
+ all_values = []
+ for _ in range(0, parameters["num_tensors"]):
+ input_values = create_tensor_data(np.float32, get_shape(parameters))
+ all_values.append(input_values)
+ return all_values, sess.run(
+ outputs, feed_dict=dict(zip(inputs, all_values)))
+
+ make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
+
+
# Toco binary path provided by the generate rule.
bin_path = None
diff --git a/tensorflow/contrib/lite/testing/generate_testspec.cc b/tensorflow/contrib/lite/testing/generate_testspec.cc
index c1092e4d25..f29c188e6c 100644
--- a/tensorflow/contrib/lite/testing/generate_testspec.cc
+++ b/tensorflow/contrib/lite/testing/generate_testspec.cc
@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include <iostream>
+
#include "tensorflow/contrib/lite/testing/generate_testspec.h"
#include "tensorflow/contrib/lite/testing/join.h"
#include "tensorflow/contrib/lite/testing/split.h"
@@ -88,13 +90,13 @@ bool GenerateTestSpecFromTensorflowModel(
TfDriver runner(input_layer, input_layer_type, input_layer_shape,
output_layer);
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.LoadModel(tensorflow_model_path);
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
@@ -118,14 +120,14 @@ bool GenerateTestSpecFromTensorflowModel(
for (int j = 0; j < input_values.size(); j++) {
runner.SetInput(j, input_values[j]);
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
}
runner.Invoke();
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
@@ -137,7 +139,7 @@ bool GenerateTestSpecFromTensorflowModel(
for (int j = 0; j < output_layer.size(); j++) {
stream << " output: \"" << runner.ReadOutput(j) << "\"\n";
if (!runner.IsValid()) {
- cerr << runner.GetErrorMessage() << endl;
+ std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
}
diff --git a/tensorflow/contrib/lite/testing/generate_testspec.h b/tensorflow/contrib/lite/testing/generate_testspec.h
index bfaf5e7ec8..b3d0db31c0 100644
--- a/tensorflow/contrib/lite/testing/generate_testspec.h
+++ b/tensorflow/contrib/lite/testing/generate_testspec.h
@@ -19,6 +19,8 @@ limitations under the License.
#include <iostream>
#include <vector>
+#include "tensorflow/contrib/lite/string.h"
+
namespace tflite {
namespace testing {
diff --git a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
index 58f6bb5382..770092e12c 100644
--- a/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
+++ b/tensorflow/contrib/lite/testing/generated_examples_zip_test.cc
@@ -53,9 +53,6 @@ tensorflow::Env* env = tensorflow::Env::Default();
// Key is a substring of the test name and value is a bug number.
// TODO(ahentz): make sure we clean this list up frequently.
std::map<string, string> kBrokenTests = {
- {R"(^\/div.*int32)", "68808744"},
- {R"(^\/sub.*int32)", "68808744"},
-
// Pad and PadV2 only supports 4D tensors.
{R"(^\/pad.*,input_shape=\[.,.\],paddings=\[\[.,.\],\[.,.\]\])",
"70527055"},
diff --git a/tensorflow/contrib/lite/testing/join.h b/tensorflow/contrib/lite/testing/join.h
index 1edee01cf9..4be19ad756 100644
--- a/tensorflow/contrib/lite/testing/join.h
+++ b/tensorflow/contrib/lite/testing/join.h
@@ -17,7 +17,8 @@ limitations under the License.
#include <cstdlib>
#include <sstream>
-#include <string>
+
+#include "tensorflow/contrib/lite/string.h"
namespace tflite {
namespace testing {
diff --git a/tensorflow/contrib/lite/testing/test_runner.h b/tensorflow/contrib/lite/testing/test_runner.h
index 96ab6be54e..fac7d01aab 100644
--- a/tensorflow/contrib/lite/testing/test_runner.h
+++ b/tensorflow/contrib/lite/testing/test_runner.h
@@ -90,7 +90,7 @@ class TestRunner {
// Invalidate the test runner, preventing it from executing any further.
void Invalidate(const string& error_message) {
- cerr << error_message << std::endl;
+ std::cerr << error_message << std::endl;
error_message_ = error_message;
}
bool IsValid() const { return error_message_.empty(); }
diff --git a/tensorflow/contrib/lite/testing/tf_driver.cc b/tensorflow/contrib/lite/testing/tf_driver.cc
index 3b27f6f3da..d6a6ff8f56 100644
--- a/tensorflow/contrib/lite/testing/tf_driver.cc
+++ b/tensorflow/contrib/lite/testing/tf_driver.cc
@@ -28,8 +28,8 @@ namespace {
tensorflow::Tensor CreateTensor(const tensorflow::DataType type,
const std::vector<int64_t>& dim) {
- tensorflow::TensorShape shape{gtl::ArraySlice<int64>{
- reinterpret_cast<const int64*>(dim.data()), dim.size()}};
+ tensorflow::TensorShape shape{tensorflow::gtl::ArraySlice<tensorflow::int64>{
+ reinterpret_cast<const tensorflow::int64*>(dim.data()), dim.size()}};
return {type, shape};
}
diff --git a/tensorflow/contrib/lite/testing/tflite_diff_flags.h b/tensorflow/contrib/lite/testing/tflite_diff_flags.h
index 7a57e8d3fb..695c2a3de6 100644
--- a/tensorflow/contrib/lite/testing/tflite_diff_flags.h
+++ b/tensorflow/contrib/lite/testing/tflite_diff_flags.h
@@ -15,6 +15,8 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_LITE_TESTING_TFLITE_DIFF_FLAGS_H_
#define TENSORFLOW_CONTRIB_LITE_TESTING_TFLITE_DIFF_FLAGS_H_
+#include <cstring>
+
#include "tensorflow/contrib/lite/testing/split.h"
#include "tensorflow/contrib/lite/testing/tflite_diff_util.h"
#include "tensorflow/core/util/command_line_flags.h"
diff --git a/tensorflow/contrib/lite/toco/BUILD b/tensorflow/contrib/lite/toco/BUILD
index 209dce56cb..c88079717d 100644
--- a/tensorflow/contrib/lite/toco/BUILD
+++ b/tensorflow/contrib/lite/toco/BUILD
@@ -93,6 +93,7 @@ cc_library(
":runtime",
":toco_port",
"//tensorflow/core:lib",
+ "@com_google_absl//absl/types:optional",
],
)
@@ -176,7 +177,7 @@ cc_library(
"graph_transformations/convert_reorder_axes.cc",
"graph_transformations/convert_squeeze_to_reshape.cc",
"graph_transformations/convert_trivial_addn_to_add.cc",
- "graph_transformations/convert_trivial_stack_to_reshape.cc",
+ "graph_transformations/convert_trivial_pack_to_reshape.cc",
"graph_transformations/convert_trivial_tile_to_concat.cc",
"graph_transformations/convert_trivial_transpose_to_reshape.cc",
"graph_transformations/create_im2col_arrays.cc",
@@ -212,7 +213,7 @@ cc_library(
"graph_transformations/quantization_util.h",
"graph_transformations/quantize.cc",
"graph_transformations/quantize_weights.cc",
- "graph_transformations/read_fake_quant_min_max.cc",
+ "graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc",
"graph_transformations/remove_final_dequantize_op.cc",
"graph_transformations/remove_tensorflow_assert.cc",
"graph_transformations/remove_tensorflow_identity.cc",
@@ -236,19 +237,21 @@ cc_library(
"graph_transformations/resolve_constant_fake_quant.cc",
"graph_transformations/resolve_constant_fill.cc",
"graph_transformations/resolve_constant_gather.cc",
+ "graph_transformations/resolve_constant_pack.cc",
"graph_transformations/resolve_constant_random_uniform.cc",
"graph_transformations/resolve_constant_range.cc",
"graph_transformations/resolve_constant_reshape.cc",
"graph_transformations/resolve_constant_shape_or_rank.cc",
"graph_transformations/resolve_constant_slice.cc",
- "graph_transformations/resolve_constant_stack.cc",
"graph_transformations/resolve_constant_strided_slice.cc",
"graph_transformations/resolve_constant_transpose.cc",
"graph_transformations/resolve_constant_unary.cc",
- "graph_transformations/resolve_mean_attributes.cc",
+ "graph_transformations/resolve_fake_quant_args_from_vars.cc",
+ "graph_transformations/resolve_gather_attributes.cc",
"graph_transformations/resolve_multiply_by_zero.cc",
"graph_transformations/resolve_pad_attributes.cc",
"graph_transformations/resolve_padv2_attributes.cc",
+ "graph_transformations/resolve_reduce_attributes.cc",
"graph_transformations/resolve_reorder_axes.cc",
"graph_transformations/resolve_reshape_attributes.cc",
"graph_transformations/resolve_slice_attributes.cc",
@@ -336,6 +339,7 @@ cc_library(
tf_cc_test(
name = "import_tensorflow_test",
srcs = ["import_tensorflow_test.cc"],
+ tags = ["no_oss"],
deps = [
":toco_tooling",
"//tensorflow/core:framework",
@@ -375,6 +379,7 @@ cc_library(
tf_cc_test(
name = "tooling_util_test",
srcs = ["tooling_util_test.cc"],
+ tags = ["no_oss"],
deps = [
":model",
":tooling_util",
@@ -409,6 +414,7 @@ tf_cc_test(
data = [
"toco_port_test.cc",
],
+ tags = ["no_oss"],
deps = [
":toco_port",
"@com_google_googletest//:gtest_main",
diff --git a/tensorflow/contrib/lite/toco/export_tensorflow.cc b/tensorflow/contrib/lite/toco/export_tensorflow.cc
index a08cdbfba6..b79bb300f0 100644
--- a/tensorflow/contrib/lite/toco/export_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/export_tensorflow.cc
@@ -215,6 +215,30 @@ void ConvertFloatTensorConst(const Model& model, const string& name,
LegacyScalarPolicy::kAvoidLegacyScalars);
}
+void ConvertBoolTensorConst(const Model& model, const string& name,
+ GraphDef* tensorflow_graph) {
+ if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
+ return;
+ }
+ CHECK(model.HasArray(name));
+ const auto& array = model.GetArray(name);
+ tensorflow::NodeDef* const_op = tensorflow_graph->add_node();
+ const_op->set_op("Const");
+ const_op->set_name(name);
+ (*const_op->mutable_attr())["dtype"].set_type(DT_BOOL);
+ auto* tensor = (*const_op->mutable_attr())["value"].mutable_tensor();
+ tensor->set_dtype(DT_BOOL);
+ const auto& data = array.GetBuffer<ArrayDataType::kBool>().data;
+ for (auto index : data) {
+ tensor->add_bool_val(index);
+ }
+ const auto& array_shape = array.shape();
+ auto* shape = tensor->mutable_tensor_shape();
+ for (int i = 0; i < array_shape.dimensions_count(); i++) {
+ shape->add_dim()->set_size(array_shape.dims(i));
+ }
+}
+
void ConvertIntTensorConst(const Model& model, const string& name,
GraphDef* tensorflow_graph) {
if (HasAlreadyExportedConst(name, *tensorflow_graph)) {
@@ -621,7 +645,8 @@ void ConvertAddOperator(const Model& model, const AddOperator& src_op,
CHECK_EQ(src_op.inputs.size(), 2);
*add_op->add_input() = src_op.inputs[0];
*add_op->add_input() = src_op.inputs[1];
- (*add_op->mutable_attr())["T"].set_type(DT_FLOAT);
+ (*add_op->mutable_attr())["T"].set_type(
+ GetTensorFlowDataType(model, src_op.outputs[0]));
}
void ConvertAddNOperator(const Model& model, const AddNOperator& src_op,
@@ -633,7 +658,8 @@ void ConvertAddNOperator(const Model& model, const AddNOperator& src_op,
*add_op->add_input() = input;
}
(*add_op->mutable_attr())["N"].set_i(src_op.inputs.size());
- (*add_op->mutable_attr())["T"].set_type(DT_FLOAT);
+ (*add_op->mutable_attr())["T"].set_type(
+ GetTensorFlowDataType(model, src_op.outputs[0]));
}
void ConvertMulOperator(const Model& model, const MulOperator& src_op,
@@ -644,16 +670,18 @@ void ConvertMulOperator(const Model& model, const MulOperator& src_op,
CHECK_EQ(src_op.inputs.size(), 2);
*add_op->add_input() = src_op.inputs[0];
*add_op->add_input() = src_op.inputs[1];
- (*add_op->mutable_attr())["T"].set_type(DT_FLOAT);
+ (*add_op->mutable_attr())["T"].set_type(
+ GetTensorFlowDataType(model, src_op.outputs[0]));
}
-void ConvertReluOperator(const ReluOperator& src_op,
+void ConvertReluOperator(const Model& model, const ReluOperator& src_op,
GraphDef* tensorflow_graph) {
tensorflow::NodeDef* relu_op = tensorflow_graph->add_node();
relu_op->set_op("Relu");
relu_op->set_name(src_op.outputs[0]);
*relu_op->add_input() = src_op.inputs[0];
- (*relu_op->mutable_attr())["T"].set_type(DT_FLOAT);
+ (*relu_op->mutable_attr())["T"].set_type(
+ GetTensorFlowDataType(model, src_op.outputs[0]));
}
void ConvertRelu1Operator(const Relu1Operator& src_op,
@@ -884,6 +912,9 @@ void ConvertFakeQuantOperator(const FakeQuantOperator& src_op,
if (src_op.num_bits) {
(*fakequant_op->mutable_attr())["num_bits"].set_i(src_op.num_bits);
}
+ if (src_op.narrow_range) {
+ (*fakequant_op->mutable_attr())["narrow_range"].set_b(src_op.narrow_range);
+ }
}
void ConvertMaxPoolOperator(const MaxPoolOperator& src_op,
@@ -1107,13 +1138,27 @@ void ConvertFloorOperator(const Model& model, const FloorOperator& src_op,
void ConvertGatherOperator(const Model& model, const GatherOperator& src_op,
GraphDef* tensorflow_graph) {
tensorflow::NodeDef* gather_op = tensorflow_graph->add_node();
- gather_op->set_op("Gather");
+ gather_op->set_op("GatherV2");
gather_op->set_name(src_op.outputs[0]);
- CHECK_EQ(src_op.inputs.size(), 2);
*gather_op->add_input() = src_op.inputs[0];
*gather_op->add_input() = src_op.inputs[1];
+ if (!src_op.axis) {
+ // Dynamic axis.
+ CHECK_EQ(src_op.inputs.size(), 3);
+ *gather_op->add_input() = src_op.inputs[2];
+ } else {
+ // Constant axis.
+ CHECK_EQ(src_op.inputs.size(), 2);
+ const string gather_axis =
+ AvailableArrayName(model, gather_op->name() + "/axis");
+ CreateIntTensorConst(gather_axis, {src_op.axis.value()}, {},
+ tensorflow_graph);
+ *gather_op->add_input() = gather_axis;
+ }
+
(*gather_op->mutable_attr())["Tindices"].set_type(DT_INT32);
+ (*gather_op->mutable_attr())["Taxis"].set_type(DT_INT32);
const tensorflow::DataType params_type =
GetTensorFlowDataType(model, src_op.inputs[0]);
(*gather_op->mutable_attr())["Tparams"].set_type(params_type);
@@ -1204,17 +1249,17 @@ void ConvertRangeOperator(const Model& model, const RangeOperator& src_op,
GetTensorFlowDataType(src_op.dtype));
}
-void ConvertStackOperator(const Model& model, const StackOperator& src_op,
- GraphDef* tensorflow_graph) {
- tensorflow::NodeDef* stack_op = tensorflow_graph->add_node();
- stack_op->set_op("Stack");
- stack_op->set_name(src_op.outputs[0]);
+void ConvertPackOperator(const Model& model, const PackOperator& src_op,
+ GraphDef* tensorflow_graph) {
+ tensorflow::NodeDef* pack_op = tensorflow_graph->add_node();
+ pack_op->set_op("Pack");
+ pack_op->set_name(src_op.outputs[0]);
for (const auto& input : src_op.inputs) {
- *stack_op->add_input() = input;
+ *pack_op->add_input() = input;
}
- (*stack_op->mutable_attr())["elem_type"].set_type(
- GetTensorFlowDataType(model, src_op.outputs[0]));
- (*stack_op->mutable_attr())["axis"].set_i(src_op.axis);
+ (*pack_op->mutable_attr())["axis"].set_i(src_op.axis);
+ (*pack_op->mutable_attr())["N"].set_i(src_op.inputs.size());
+ (*pack_op->mutable_attr())["T"].set_type(GetTensorFlowDataType(src_op.dtype));
}
void ConvertFillOperator(const Model& model, const FillOperator& src_op,
@@ -1620,10 +1665,11 @@ void ConvertSliceOperator(const Model& model, const SliceOperator& src_op,
CreateSliceInput(src_op.inputs[2], src_op.size, tensorflow_graph);
}
-void ConvertMeanOperator(const Model& model, const MeanOperator& src_op,
- GraphDef* tensorflow_graph) {
+template <typename T>
+void ConvertReduceOperator(const Model& model, const T& src_op,
+ GraphDef* tensorflow_graph, const string& op_name) {
tensorflow::NodeDef* new_op = tensorflow_graph->add_node();
- new_op->set_op("Mean");
+ new_op->set_op(op_name);
new_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*new_op->add_input() = src_op.inputs[0];
@@ -1632,6 +1678,9 @@ void ConvertMeanOperator(const Model& model, const MeanOperator& src_op,
const tensorflow::DataType params_type =
GetTensorFlowDataType(model, src_op.inputs[0]);
(*new_op->mutable_attr())["T"].set_type(params_type);
+ const tensorflow::DataType indices_type =
+ GetTensorFlowDataType(model, src_op.inputs[1]);
+ (*new_op->mutable_attr())["Tidx"].set_type(indices_type);
if (src_op.keep_dims) {
(*new_op->mutable_attr())["keep_dims"].set_b(true);
@@ -1688,43 +1737,43 @@ void ConvertSubOperator(const Model& model, const SubOperator& src_op,
void ConvertTensorFlowMinimumOperator(const Model& model,
const TensorFlowMinimumOperator& src_op,
GraphDef* tensorflow_graph) {
- tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
- sub_op->set_op("Minimum");
- sub_op->set_name(src_op.outputs[0]);
+ tensorflow::NodeDef* min_op = tensorflow_graph->add_node();
+ min_op->set_op("Minimum");
+ min_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
- *sub_op->add_input() = src_op.inputs[0];
- *sub_op->add_input() = src_op.inputs[1];
+ *min_op->add_input() = src_op.inputs[0];
+ *min_op->add_input() = src_op.inputs[1];
const tensorflow::DataType data_type =
GetTensorFlowDataType(model, src_op.inputs[0]);
- (*sub_op->mutable_attr())["T"].set_type(data_type);
+ (*min_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertTensorFlowMaximumOperator(const Model& model,
const TensorFlowMaximumOperator& src_op,
GraphDef* tensorflow_graph) {
- tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
- sub_op->set_op("Maximum");
- sub_op->set_name(src_op.outputs[0]);
+ tensorflow::NodeDef* max_op = tensorflow_graph->add_node();
+ max_op->set_op("Maximum");
+ max_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
- *sub_op->add_input() = src_op.inputs[0];
- *sub_op->add_input() = src_op.inputs[1];
+ *max_op->add_input() = src_op.inputs[0];
+ *max_op->add_input() = src_op.inputs[1];
const tensorflow::DataType data_type =
GetTensorFlowDataType(model, src_op.inputs[0]);
- (*sub_op->mutable_attr())["T"].set_type(data_type);
+ (*max_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertSelectOperator(const Model& model, const SelectOperator& src_op,
GraphDef* tensorflow_graph) {
- tensorflow::NodeDef* sub_op = tensorflow_graph->add_node();
- sub_op->set_op("Select");
- sub_op->set_name(src_op.outputs[0]);
+ tensorflow::NodeDef* select_op = tensorflow_graph->add_node();
+ select_op->set_op("Select");
+ select_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 3);
- *sub_op->add_input() = src_op.inputs[0];
- *sub_op->add_input() = src_op.inputs[1];
- *sub_op->add_input() = src_op.inputs[2];
+ *select_op->add_input() = src_op.inputs[0];
+ *select_op->add_input() = src_op.inputs[1];
+ *select_op->add_input() = src_op.inputs[2];
const tensorflow::DataType data_type =
GetTensorFlowDataType(model, src_op.inputs[1]);
- (*sub_op->mutable_attr())["T"].set_type(data_type);
+ (*select_op->mutable_attr())["T"].set_type(data_type);
}
void ConvertTileOperator(const Model& model,
@@ -1747,11 +1796,14 @@ void ConvertTileOperator(const Model& model,
void ConvertTopKV2Operator(const Model& model, const TopKV2Operator& src_op,
GraphDef* tensorflow_graph) {
tensorflow::NodeDef* topk_op = tensorflow_graph->add_node();
- topk_op->set_op("TOPKV2");
+ topk_op->set_op("TopKV2");
topk_op->set_name(src_op.outputs[0]);
CHECK_EQ(src_op.inputs.size(), 2);
*topk_op->add_input() = src_op.inputs[0];
*topk_op->add_input() = src_op.inputs[1];
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[0]);
+ (*topk_op->mutable_attr())["T"].set_type(data_type);
(*topk_op->mutable_attr())["sorted"].set_b(true);
}
@@ -1822,6 +1874,43 @@ void ConvertPowOperator(const Model& model, const PowOperator& src_op,
(*pow_op->mutable_attr())["T"].set_type(data_type);
}
+void ConvertAnyOperator(const Model& model, const AnyOperator& src_op,
+ GraphDef* tensorflow_graph) {
+ tensorflow::NodeDef* any_op = tensorflow_graph->add_node();
+ any_op->set_op("Any");
+ any_op->set_name(src_op.outputs[0]);
+ CHECK_EQ(src_op.inputs.size(), 2);
+ for (int i = 0; i < 2; ++i) {
+ *any_op->add_input() = src_op.inputs[i];
+ }
+ const tensorflow::DataType data_type =
+ GetTensorFlowDataType(model, src_op.inputs[1]);
+ (*any_op->mutable_attr())["Tidx"].set_type(data_type);
+ (*any_op->mutable_attr())["keep_dims"].set_b(src_op.keep_dims);
+}
+
+void ConvertLogicalAndOperator(const Model& model,
+ const LogicalAndOperator& src_op,
+ GraphDef* tensorflow_graph) {
+ tensorflow::NodeDef* logical_op = tensorflow_graph->add_node();
+ logical_op->set_op("LogicalAnd");
+ logical_op->set_name(src_op.outputs[0]);
+ CHECK_EQ(src_op.inputs.size(), 2);
+ for (int i = 0; i < 2; ++i) {
+ *logical_op->add_input() = src_op.inputs[i];
+ }
+}
+
+void ConvertLogicalNotOperator(const Model& model,
+ const LogicalNotOperator& src_op,
+ GraphDef* tensorflow_graph) {
+ tensorflow::NodeDef* logical_op = tensorflow_graph->add_node();
+ logical_op->set_op("LogicalNot");
+ logical_op->set_name(src_op.outputs[0]);
+ CHECK_EQ(src_op.inputs.size(), 1);
+ *logical_op->add_input() = src_op.inputs[0];
+}
+
void ConvertOperator(const Model& model, const Operator& src_op,
GraphDef* tensorflow_graph) {
if (src_op.fused_activation_function != FusedActivationFunctionType::kNone) {
@@ -1858,7 +1947,7 @@ void ConvertOperator(const Model& model, const Operator& src_op,
ConvertMulOperator(model, static_cast<const MulOperator&>(src_op),
tensorflow_graph);
} else if (src_op.type == OperatorType::kRelu) {
- ConvertReluOperator(static_cast<const ReluOperator&>(src_op),
+ ConvertReluOperator(model, static_cast<const ReluOperator&>(src_op),
tensorflow_graph);
} else if (src_op.type == OperatorType::kRelu1) {
ConvertRelu1Operator(static_cast<const Relu1Operator&>(src_op),
@@ -1958,8 +2047,24 @@ void ConvertOperator(const Model& model, const Operator& src_op,
model, static_cast<const StridedSliceOperator&>(src_op),
tensorflow_graph);
} else if (src_op.type == OperatorType::kMean) {
- ConvertMeanOperator(model, static_cast<const MeanOperator&>(src_op),
- tensorflow_graph);
+ ConvertReduceOperator(model, static_cast<const MeanOperator&>(src_op),
+ tensorflow_graph, "Mean");
+ } else if (src_op.type == OperatorType::kSum) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowSumOperator&>(src_op),
+ tensorflow_graph, "Sum");
+ } else if (src_op.type == OperatorType::kReduceProd) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowProdOperator&>(src_op),
+ tensorflow_graph, "Prod");
+ } else if (src_op.type == OperatorType::kReduceMin) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowMaxOperator&>(src_op),
+ tensorflow_graph, "Min");
+ } else if (src_op.type == OperatorType::kReduceMax) {
+ ConvertReduceOperator(model,
+ static_cast<const TensorFlowMaxOperator&>(src_op),
+ tensorflow_graph, "Max");
} else if (src_op.type == OperatorType::kSub) {
ConvertSubOperator(model, static_cast<const SubOperator&>(src_op),
tensorflow_graph);
@@ -1999,9 +2104,9 @@ void ConvertOperator(const Model& model, const Operator& src_op,
} else if (src_op.type == OperatorType::kRange) {
ConvertRangeOperator(model, static_cast<const RangeOperator&>(src_op),
tensorflow_graph);
- } else if (src_op.type == OperatorType::kStack) {
- ConvertStackOperator(model, static_cast<const StackOperator&>(src_op),
- tensorflow_graph);
+ } else if (src_op.type == OperatorType::kPack) {
+ ConvertPackOperator(model, static_cast<const PackOperator&>(src_op),
+ tensorflow_graph);
} else if (src_op.type == OperatorType::kFill) {
ConvertFillOperator(model, static_cast<const FillOperator&>(src_op),
tensorflow_graph);
@@ -2042,6 +2147,17 @@ void ConvertOperator(const Model& model, const Operator& src_op,
} else if (src_op.type == OperatorType::kPow) {
ConvertPowOperator(model, static_cast<const PowOperator&>(src_op), "Pow",
tensorflow_graph);
+ } else if (src_op.type == OperatorType::kAny) {
+ ConvertAnyOperator(model, static_cast<const AnyOperator&>(src_op),
+ tensorflow_graph);
+ } else if (src_op.type == OperatorType::kLogicalAnd) {
+ ConvertLogicalAndOperator(model,
+ static_cast<const LogicalAndOperator&>(src_op),
+ tensorflow_graph);
+ } else if (src_op.type == OperatorType::kLogicalNot) {
+ ConvertLogicalNotOperator(model,
+ static_cast<const LogicalNotOperator&>(src_op),
+ tensorflow_graph);
} else {
LOG(FATAL) << "Unhandled operator type " << OperatorTypeName(src_op.type);
}
@@ -2120,6 +2236,9 @@ void ExportTensorFlowGraphDefImplementation(const Model& model,
const auto& array = *array_pair.second;
if (array.buffer) {
switch (array.data_type) {
+ case ArrayDataType::kBool:
+ ConvertBoolTensorConst(model, array_name, tensorflow_graph);
+ break;
case ArrayDataType::kFloat:
ConvertFloatTensorConst(model, array_name, tensorflow_graph);
break;
diff --git a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
index 18b7848db8..4bf47aa3c4 100644
--- a/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
+++ b/tensorflow/contrib/lite/toco/g3doc/cmdline_examples.md
@@ -36,7 +36,7 @@ There are two approaches to running TOCO via command line.
* `tflite_convert`: Starting from TensorFlow 1.9, the command-line tool
`tflite_convert` will be installed as part of the Python package. All of the
examples below use `tflite_convert` for simplicity.
- * Example: `tflite --output_file=...`
+ * Example: `tflite_convert --output_file=...`
* `bazel`: In order to run the latest version of TOCO, [clone the TensorFlow
repository](https://www.tensorflow.org/install/install_sources#clone_the_tensorflow_repository)
and use `bazel`. This is the recommended approach for converting models that
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/convert_expanddims_to_reshape.cc b/tensorflow/contrib/lite/toco/graph_transformations/convert_expanddims_to_reshape.cc
index 56f48d47de..310a88484c 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/convert_expanddims_to_reshape.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/convert_expanddims_to_reshape.cc
@@ -40,11 +40,6 @@ bool ConvertExpandDimsToReshape::Run(Model* model, std::size_t op_index) {
// Yield until input dims have been resolved.
return false;
}
- if (input_array.shape().dimensions_count() == 0) {
- // Input array cannot be 0-D.
- // (Unsure if this is TF behavior, but was required to get a test to pass.)
- return false;
- }
const auto& axis_array = model->GetArray(expand_op->inputs[1]);
if (!axis_array.has_shape()) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/convert_trivial_stack_to_reshape.cc b/tensorflow/contrib/lite/toco/graph_transformations/convert_trivial_pack_to_reshape.cc
index 0615b5e6c6..75113a2a8c 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/convert_trivial_stack_to_reshape.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/convert_trivial_pack_to_reshape.cc
@@ -25,19 +25,19 @@ limitations under the License.
namespace toco {
-bool ConvertTrivialStackToReshape::Run(Model* model, std::size_t op_index) {
- auto stack_it = model->operators.begin() + op_index;
- if (stack_it->get()->type != OperatorType::kStack) {
+bool ConvertTrivialPackToReshape::Run(Model* model, std::size_t op_index) {
+ auto pack_it = model->operators.begin() + op_index;
+ if (pack_it->get()->type != OperatorType::kPack) {
return false;
}
- auto* stack_op = static_cast<StackOperator*>(stack_it->get());
- if (stack_op->inputs.size() > 1) {
+ auto* pack_op = static_cast<PackOperator*>(pack_it->get());
+ if (pack_op->inputs.size() > 1) {
// Not trivial.
return false;
}
- CHECK_EQ(stack_op->outputs.size(), 1);
+ CHECK_EQ(pack_op->outputs.size(), 1);
- const auto& input_array = model->GetArray(stack_op->inputs[0]);
+ const auto& input_array = model->GetArray(pack_op->inputs[0]);
if (!input_array.has_shape()) {
// Yield until input dims have been resolved.
return false;
@@ -48,16 +48,16 @@ bool ConvertTrivialStackToReshape::Run(Model* model, std::size_t op_index) {
return false;
}
- AddMessageF("Converting trivial %s to a reshape", LogName(*stack_op));
+ AddMessageF("Converting trivial %s to a reshape", LogName(*pack_op));
// Note that we could convert to ExpandDims but toco prefers reshapes.
auto* reshape_op = new TensorFlowReshapeOperator;
- reshape_op->inputs = {stack_op->inputs[0]};
- reshape_op->outputs = stack_op->outputs;
+ reshape_op->inputs = {pack_op->inputs[0]};
+ reshape_op->outputs = pack_op->outputs;
// Create shape param.
string shape_array_name =
- AvailableArrayName(*model, stack_op->outputs[0] + "_shape");
+ AvailableArrayName(*model, pack_op->outputs[0] + "_shape");
Array& shape_array = model->GetOrCreateArray(shape_array_name);
*(shape_array.mutable_shape()->mutable_dims()) = {
1 + input_array.shape().dimensions_count()};
@@ -70,10 +70,10 @@ bool ConvertTrivialStackToReshape::Run(Model* model, std::size_t op_index) {
}
// Replace the operator in the graph.
- const auto reshape_it = model->operators.emplace(stack_it, reshape_op);
- stack_it = reshape_it + 1;
- CHECK_EQ(stack_it->get(), stack_op);
- model->operators.erase(stack_it);
+ const auto reshape_it = model->operators.emplace(pack_it, reshape_op);
+ pack_it = reshape_it + 1;
+ CHECK_EQ(pack_it->get(), pack_op);
+ model->operators.erase(pack_it);
return true;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
index 2c7ffe4884..1688586733 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/dequantize.cc
@@ -159,6 +159,7 @@ bool DequantizeArray(const string& array_name,
new_array.GetOrCreateMinMax() = array->GetMinMax();
fakequant_op->minmax.reset(new MinMax);
*fakequant_op->minmax = array->GetMinMax();
+ fakequant_op->narrow_range = array->narrow_range;
if (must_insert_fakequant_before) {
for (const auto& op : model->operators) {
for (string& output : op->outputs) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h b/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
index 8cd1298bca..b7634e28c6 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
+++ b/tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h
@@ -116,7 +116,7 @@ DECLARE_GRAPH_TRANSFORMATION(ConvertExpandDimsToReshape)
DECLARE_GRAPH_TRANSFORMATION(ConvertPureConvToDepthwise)
DECLARE_GRAPH_TRANSFORMATION(ConvertSqueezeToReshape)
DECLARE_GRAPH_TRANSFORMATION(ConvertTrivialAddNToAdd)
-DECLARE_GRAPH_TRANSFORMATION(ConvertTrivialStackToReshape)
+DECLARE_GRAPH_TRANSFORMATION(ConvertTrivialPackToReshape)
DECLARE_GRAPH_TRANSFORMATION(ConvertTrivialTileToConcat)
DECLARE_GRAPH_TRANSFORMATION(ConvertTrivialTransposeToReshape)
DECLARE_GRAPH_TRANSFORMATION(ConvertReorderAxes)
@@ -159,7 +159,7 @@ DECLARE_GRAPH_TRANSFORMATION(ResolveConstantBinaryOperator)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantUnaryOperator)
DECLARE_GRAPH_TRANSFORMATION(CreateIm2colArrays)
DECLARE_GRAPH_TRANSFORMATION(DropIm2colArrays)
-DECLARE_GRAPH_TRANSFORMATION(ReadFakeQuantMinMax)
+DECLARE_GRAPH_TRANSFORMATION(ReadArrayMinmaxAndNarrowRangeFromFakeQuant)
DECLARE_GRAPH_TRANSFORMATION(ReorderElementwiseUnary)
DECLARE_GRAPH_TRANSFORMATION(ReorderReshapeTranspose)
DECLARE_GRAPH_TRANSFORMATION(ResolveReorderAxes)
@@ -180,13 +180,13 @@ DECLARE_GRAPH_TRANSFORMATION(ResolvePadAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolvePadV2Attributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveStridedSliceAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveSliceAttributes)
-DECLARE_GRAPH_TRANSFORMATION(ResolveMeanAttributes)
+DECLARE_GRAPH_TRANSFORMATION(ResolveReduceAttributes)
DECLARE_GRAPH_TRANSFORMATION(ResolveTransposeAttributes)
+DECLARE_GRAPH_TRANSFORMATION(ResolveConstantPack)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantRandomUniform)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantRange)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantShapeOrRank)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantSlice)
-DECLARE_GRAPH_TRANSFORMATION(ResolveConstantStack)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantStridedSlice)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantFill)
DECLARE_GRAPH_TRANSFORMATION(ResolveConstantGather)
@@ -194,6 +194,8 @@ DECLARE_GRAPH_TRANSFORMATION(ResolveMultiplyByZero)
DECLARE_GRAPH_TRANSFORMATION(Dequantize)
DECLARE_GRAPH_TRANSFORMATION(UnpartitionEmbeddingLookup)
DECLARE_GRAPH_TRANSFORMATION(ShuffleFCWeights)
+DECLARE_GRAPH_TRANSFORMATION(ResolveFakeQuantArgsFromVars)
+DECLARE_GRAPH_TRANSFORMATION(ResolveGatherAttributes)
class PropagateDefaultMinMax : public GraphTransformation {
public:
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc b/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
index 45d9f73a1e..f684de08ab 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/make_initial_dequantize_operator.cc
@@ -85,15 +85,8 @@ bool AddDequantizeOperatorToInput(const string& input_name, const Operator* op,
dequantized_input_minmax = input_minmax;
auto& input_qparams = input_array.GetOrCreateQuantizationParams();
input_array.data_type = input_array.final_data_type;
- if (input_array.data_type == ArrayDataType::kUint8) {
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(input_minmax,
- &input_qparams);
- } else if (input_array.data_type == ArrayDataType::kInt16) {
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(input_minmax,
- &input_qparams);
- } else {
- LOG(FATAL) << "unhandled data type";
- }
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ input_array, input_array.data_type, &input_qparams);
transformation->AddMessageF(
"Created %s"
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
index 670bcf64e7..9848d55c83 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_array_data_types.cc
@@ -62,6 +62,9 @@ bool PropagateArrayDataTypes::Run(Model* model, std::size_t op_index) {
case OperatorType::kGreaterEqual:
case OperatorType::kEqual:
case OperatorType::kNotEqual:
+ case OperatorType::kAny:
+ case OperatorType::kLogicalAnd:
+ case OperatorType::kLogicalNot:
// These operators unconditionally produce bool outputs
SetDataTypeForAllOutputs(model, op, ArrayDataType::kBool);
break;
@@ -190,6 +193,14 @@ bool PropagateArrayDataTypes::Run(Model* model, std::size_t op_index) {
SetDataTypeForAllOutputs(model, op, data_type);
break;
}
+ case OperatorType::kPack: {
+ const ArrayDataType data_type = model->GetArray(op->inputs[0]).data_type;
+ for (const auto& input : op->inputs) {
+ CHECK(data_type == model->GetArray(input).data_type);
+ }
+ SetDataTypeForAllOutputs(model, op, data_type);
+ break;
+ }
default: {
// These operators produce outputs with the same type as their 1st input
CHECK_GT(op->inputs.size(), 0);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
index 53fc87da7b..3ad6b0ec6f 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fake_quant_num_bits.cc
@@ -66,24 +66,10 @@ bool ChangeArrayDataType(GraphTransformation* transformation, Array* array,
"Rescaling min/max from %g,%g (%s) to %g,%g (%s)", array_minmax.min,
array_minmax.max, ArrayDataTypeName(array->data_type), min, max,
ArrayDataTypeName(new_data_type));
-
array_minmax.min = min;
array_minmax.max = max;
- switch (new_data_type) {
- case ArrayDataType::kUint8:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(
- array_minmax, array->quantization_params.get());
- break;
- case ArrayDataType::kInt16:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(
- array_minmax, array->quantization_params.get());
- break;
- default:
- CHECK(false) << "Unsupported quantized data type: "
- << ArrayDataTypeName(new_data_type);
- return false;
- }
-
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ *array, new_data_type, array->quantization_params.get());
// Directly change the type as the array was already quantized.
array->data_type = new_data_type;
changed = true;
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
index 4f95c57451..62ed5c46e9 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/propagate_fixed_sizes.cc
@@ -437,6 +437,7 @@ void ProcessTensorFlowReshapeOperator(Model* model,
product_non_wildcard_dims *= shape_data[i];
}
}
+
const int input_flat_size = RequiredBufferSizeForShape(input_shape);
if (has_wildcard) {
CHECK_GE(input_flat_size, product_non_wildcard_dims)
@@ -445,6 +446,12 @@ void ProcessTensorFlowReshapeOperator(Model* model,
<< op->outputs[0] << "\". Are your input shapes correct?";
shape_data[wildcard_index] = input_flat_size / product_non_wildcard_dims;
}
+
+ if (shape_data.size() == 1 && shape_data[0] == 0) {
+ // We have reshaped a scalar, so preserve as a scalar.
+ shape_data.clear();
+ }
+
auto& output_shape = *output_array.mutable_shape();
*output_shape.mutable_dims() = shape_data;
CHECK_EQ(input_flat_size, RequiredBufferSizeForShape(output_shape))
@@ -522,12 +529,14 @@ void ProcessAddNOperator(Model* model, Operator* op) {
bool KeepDims(const Operator& op) {
switch (op.type) {
- case OperatorType::kMin: // Reduction Min
+ case OperatorType::kReduceMin: // Reduction Min
return static_cast<const TensorFlowMinOperator&>(op).keep_dims;
- case OperatorType::kMax: // Reduction Max
+ case OperatorType::kReduceMax: // Reduction Max
return static_cast<const TensorFlowMaxOperator&>(op).keep_dims;
case OperatorType::kSum:
return static_cast<const TensorFlowSumOperator&>(op).keep_dims;
+ case OperatorType::kReduceProd:
+ return static_cast<const TensorFlowProdOperator&>(op).keep_dims;
case OperatorType::kMean:
return static_cast<const MeanOperator&>(op).keep_dims;
default:
@@ -1034,17 +1043,28 @@ void ProcessGatherOperator(Model* model, GatherOperator* op) {
return;
}
+ // Yield until the axis has been resolved.
+ if (!op->axis) {
+ return;
+ }
+ int axis = op->axis.value();
+
const auto& input_shape = input_array.shape();
const auto& indices_shape = indices_array.shape();
QCHECK_GE(input_shape.dimensions_count(), 1);
op->input_rank = input_shape.dimensions_count();
+ QCHECK_LT(axis, op->input_rank);
- // Copy the input dimensions to the output except for dimension 0,
+ // Copy the input dimensions to the output except for the axis dimensions
// where the dimension of indices_shape is used.
- // TODO(mgubin): if axis != 0 this is not true, change when it's supported.
auto output_dims = output_array.mutable_shape()->mutable_dims();
- output_dims->push_back(indices_shape.dims(0));
- for (int dim = 1; dim < input_shape.dimensions_count(); dim++) {
+ for (int dim = 0; dim < axis; ++dim) {
+ output_dims->push_back(input_shape.dims(dim));
+ }
+ for (int dim = 0; dim < indices_shape.dimensions_count(); ++dim) {
+ output_dims->push_back(indices_shape.dims(dim));
+ }
+ for (int dim = axis + 1; dim < input_shape.dimensions_count(); ++dim) {
output_dims->push_back(input_shape.dims(dim));
}
}
@@ -1190,7 +1210,7 @@ void ProcessShapeOperator(Model* model, TensorFlowShapeOperator* op) {
output_shape->ReplaceDims({input_array.shape().dimensions_count()});
}
-void ProcessStackOperator(Model* model, StackOperator* op) {
+void ProcessPackOperator(Model* model, PackOperator* op) {
CHECK_GE(op->inputs.size(), 1);
CHECK_EQ(op->outputs.size(), 1);
auto& output_array = model->GetArray(op->outputs[0]);
@@ -1199,7 +1219,7 @@ void ProcessStackOperator(Model* model, StackOperator* op) {
return;
}
- std::unique_ptr<Shape> stacked_shape;
+ std::unique_ptr<Shape> packed_shape;
for (const auto& input : op->inputs) {
const auto& input_array = model->GetArray(input);
if (!input_array.has_shape()) {
@@ -1208,23 +1228,23 @@ void ProcessStackOperator(Model* model, StackOperator* op) {
}
Shape shape = input_array.shape();
- if (!stacked_shape) {
- stacked_shape.reset(new Shape(shape));
+ if (!packed_shape) {
+ packed_shape.reset(new Shape(shape));
} else {
- CHECK(*stacked_shape == shape) << "All input arrays to Stack operators "
- "must have the same shape. Input \""
- << input << "\" is different.";
+ CHECK(*packed_shape == shape) << "All input arrays to Pack operators "
+ "must have the same shape. Input \""
+ << input << "\" is different.";
}
}
int axis = op->axis;
if (axis < 0) {
// Handle negative axis
- axis += stacked_shape->dims().size() + 1;
+ axis += packed_shape->dims().size() + 1;
}
- stacked_shape->mutable_dims()->insert(
- stacked_shape->mutable_dims()->begin() + axis, op->inputs.size());
- output_array.copy_shape(*stacked_shape);
+ packed_shape->mutable_dims()->insert(
+ packed_shape->mutable_dims()->begin() + axis, op->inputs.size());
+ output_array.copy_shape(*packed_shape);
}
void ProcessStridedSliceOperator(Model* model, StridedSliceOperator* op) {
@@ -1499,6 +1519,65 @@ void ProcessTileOperator(Model* model, TensorFlowTileOperator* op) {
}
}
+void ProcessAnyOperator(Model* model, AnyOperator* op) {
+ CHECK_EQ(op->inputs.size(), 2);
+ CHECK_EQ(op->outputs.size(), 1);
+
+ auto& output_array = model->GetArray(op->outputs[0]);
+ if (output_array.has_shape()) {
+ // We have already run.
+ return;
+ }
+
+ const auto& input_array = model->GetArray(op->inputs[0]);
+ if (!input_array.has_shape()) {
+ // Yield until input dims have been resolved.
+ return;
+ }
+ const auto& input_shape = input_array.shape();
+
+ auto& reduction_indices_array = model->GetArray(op->inputs[1]);
+ if (!reduction_indices_array.has_shape()) {
+ // Yield until reduction indices shape been resolved.
+ return;
+ }
+ if (!reduction_indices_array.buffer) {
+ // Yield until the reduction indices are constant.
+ return;
+ }
+ CHECK(reduction_indices_array.data_type == ArrayDataType::kInt32)
+ << "Any reduction input must be int32";
+
+ int input_rank = input_shape.dimensions_count();
+ std::set<int32> true_indices;
+ const auto& reduction_indices =
+ reduction_indices_array.GetBuffer<ArrayDataType::kInt32>().data;
+ for (int i = 0; i < reduction_indices.size(); ++i) {
+ const int32 reduction_index = reduction_indices[i];
+ if (reduction_index < -input_rank || reduction_index >= input_rank) {
+ CHECK(false) << "Invalid reduction dimension " << reduction_index
+ << " for input with " << input_rank << " dimensions";
+ }
+ int32 wrapped_index = reduction_index;
+ if (wrapped_index < 0) {
+ wrapped_index += input_rank;
+ }
+ true_indices.insert(wrapped_index);
+ }
+
+ auto* mutable_dims = output_array.mutable_shape()->mutable_dims();
+ mutable_dims->clear();
+ for (int i = 0; i < input_rank; ++i) {
+ if (true_indices.count(i) > 0) {
+ if (op->keep_dims) {
+ mutable_dims->emplace_back(1);
+ }
+ } else {
+ mutable_dims->emplace_back(input_shape.dims(i));
+ }
+ }
+}
+
} // namespace
bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
@@ -1537,6 +1616,8 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kFloor:
case OperatorType::kExp:
case OperatorType::kSin:
+ case OperatorType::kLogicalAnd:
+ case OperatorType::kLogicalNot:
ProcessSimpleOperator(model, op, 0);
break;
case OperatorType::kGather:
@@ -1605,9 +1686,10 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kL2Pool:
ProcessL2PoolOperator(model, static_cast<L2PoolOperator*>(op));
break;
- case OperatorType::kMin: // Reduction Min
- case OperatorType::kMax: // Reduction Max
+ case OperatorType::kReduceMin: // Reduction Min
+ case OperatorType::kReduceMax: // Reduction Max
case OperatorType::kSum:
+ case OperatorType::kReduceProd:
case OperatorType::kMean:
ProcessTensorFlowReductionOperator(model, op);
break;
@@ -1656,8 +1738,8 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kShape:
ProcessShapeOperator(model, static_cast<TensorFlowShapeOperator*>(op));
break;
- case OperatorType::kStack:
- ProcessStackOperator(model, static_cast<StackOperator*>(op));
+ case OperatorType::kPack:
+ ProcessPackOperator(model, static_cast<PackOperator*>(op));
break;
case OperatorType::kReorderAxes:
ProcessReorderAxesOperator(model, static_cast<ReorderAxesOperator*>(op));
@@ -1729,6 +1811,9 @@ bool PropagateFixedSizes::Run(Model* model, std::size_t op_index) {
case OperatorType::kTile:
ProcessTileOperator(model, static_cast<TensorFlowTileOperator*>(op));
break;
+ case OperatorType::kAny:
+ ProcessAnyOperator(model, static_cast<AnyOperator*>(op));
+ break;
default:
// Unimplemented, another graph transformation should drop it.
LOG(FATAL) << "Unhandled operator type " << OperatorTypeName(op->type);
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
index d74cad9a62..44733391f5 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.cc
@@ -74,46 +74,54 @@ ArrayDataType GetQuantizedDataType(const Array& array,
}
}
-void GetQuantizationParams(ArrayDataType data_type, const MinMax& minmax,
- QuantizationParams* quantization_params) {
- switch (data_type) {
+template <ArrayDataType A>
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ const Array& array, QuantizationParams* quantization_params) {
+ *quantization_params = ::tflite::ChooseQuantizationParams<DataType<A>>(
+ array.minmax->min, array.minmax->max, array.narrow_range);
+}
+
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ const Array& array, ArrayDataType quantized_data_type,
+ QuantizationParams* quantization_params) {
+ switch (quantized_data_type) {
case ArrayDataType::kInt8:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt8>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt8>(array, quantization_params);
break;
case ArrayDataType::kUint8:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint8>(array, quantization_params);
break;
case ArrayDataType::kInt16:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt16>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt16>(array, quantization_params);
break;
case ArrayDataType::kUint16:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint16>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint16>(array, quantization_params);
break;
case ArrayDataType::kInt32:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt32>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt32>(array, quantization_params);
break;
case ArrayDataType::kUint32:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint32>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint32>(array, quantization_params);
break;
case ArrayDataType::kInt64:
- GetQuantizationParamsFromMinMax<ArrayDataType::kInt64>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kInt64>(array, quantization_params);
break;
case ArrayDataType::kUint64:
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint64>(
- minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType<
+ ArrayDataType::kUint64>(array, quantization_params);
break;
case ArrayDataType::kFloat:
case ArrayDataType::kNone:
default:
LOG(FATAL) << "Unhandled final quantization type "
- << static_cast<int>(data_type);
+ << static_cast<int>(quantized_data_type);
}
}
@@ -121,8 +129,8 @@ namespace {
template <ArrayDataType A>
std::unique_ptr<GenericBuffer> QuantizeBuffer(
- const GenericBuffer& buffer,
- const QuantizationParams& quantization_params) {
+ const Array& array, const QuantizationParams& quantization_params) {
+ const GenericBuffer& buffer = *array.buffer;
const auto inverse_scale = 1. / quantization_params.scale;
CHECK(buffer.type == ArrayDataType::kFloat);
const auto& float_buffer =
@@ -140,8 +148,15 @@ std::unique_ptr<GenericBuffer> QuantizeBuffer(
} else {
scaled_val = quantization_params.zero_point + inverse_scale * src_val;
}
- quantized_buffer->data[i] =
- tflite::SafeCast<DataType<A>>(std::round(scaled_val));
+ auto integer_val = tflite::SafeCast<DataType<A>>(std::round(scaled_val));
+ // In addition to its effect on the choice of quantization params upstream
+ // of here, narrow_range also means nudge the min quantized value by +1,
+ // so e.g. uint8 values get constrained to [1, 255].
+ if (integer_val == std::numeric_limits<DataType<A>>::min() &&
+ array.narrow_range) {
+ integer_val++;
+ }
+ quantized_buffer->data[i] = integer_val;
}
return std::unique_ptr<GenericBuffer>(quantized_buffer);
}
@@ -155,7 +170,7 @@ void QuantizeArray(GraphTransformation* transformation, Model* model,
CHECK(!array.quantization_params);
array.GetOrCreateQuantizationParams() = quantization_params;
if (array.buffer) {
- array.buffer = QuantizeBuffer<A>(*array.buffer, quantization_params);
+ array.buffer = QuantizeBuffer<A>(array, quantization_params);
}
array.data_type = A;
array.final_data_type = A;
@@ -210,8 +225,8 @@ bool IsArrayQuantizedRangeSubset(GraphTransformation* transformation,
} else {
// Work around cases where we are asking for this prior to the Quantize
// transformation having added the quantization_params.
- GetQuantizationParams(quantized_data_type, *array.minmax,
- &quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, quantized_data_type, &quantization_params);
transformation->AddMessageF(
"No quantization params - infering from data type %s with minmax "
"%g,%g as zero_point=%g, scale=%g",
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
index 79a2ce7e50..cf093c6f17 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantization_util.h
@@ -38,21 +38,11 @@ bool GetQuantizedDataTypeNumericalRange(ArrayDataType data_type,
ArrayDataType GetQuantizedDataType(const Array& array,
ArrayDataType default_type);
-// Returns the quantization params for the array with the given data type and
-// minmax.
-void GetQuantizationParams(ArrayDataType data_type, const MinMax& minmax,
- QuantizationParams* quantization_params);
-
-// Returns the quantization params for the data type and minmax values.
-template <ArrayDataType A>
-void GetQuantizationParamsFromMinMax(const MinMax& minmax,
- QuantizationParams* quantization_params) {
- using Integer = DataType<A>;
- const double rmin = minmax.min;
- const double rmax = minmax.max;
- *quantization_params =
- ::tflite::ChooseQuantizationParams<Integer>(rmin, rmax);
-}
+// Chooses the quantization params for a given array and a given target
+// quantized data type (which may not be the array's current data type).
+void ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ const Array& array, ArrayDataType quantized_data_type,
+ QuantizationParams* quantization_params);
// Quantizes an array by setting its data type and (if constant) quantizing
// all values in the array.
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
index 58885b4950..f6ce3b3ecb 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize.cc
@@ -50,6 +50,7 @@ bool SupportsQuantization(const Operator& op) {
type == OperatorType::kSqueeze || type == OperatorType::kPad ||
type == OperatorType::kPadV2 || type == OperatorType::kReshape ||
type == OperatorType::kTanh || type == OperatorType::kMul ||
+ type == OperatorType::kBatchToSpaceND ||
type == OperatorType::kSpaceToBatchND ||
type == OperatorType::kSpaceToDepth ||
type == OperatorType::kStridedSlice ||
@@ -212,13 +213,15 @@ bool ChooseQuantizationForOperatorInput(
if (op.type == OperatorType::kLstmCell) {
if (input_index == LstmCellOperator::PREV_STATE_INPUT) {
*quantized_data_type = ArrayDataType::kInt16;
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
return true;
}
}
*quantized_data_type = GetQuantizedDataType(array, ArrayDataType::kUint8);
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
transformation->AddMessageF(
"For input array %s with min=%g, max=%g, chose to quantize as %s (f=%s) "
"with zero_point=%d, scale=%g",
@@ -358,12 +361,14 @@ bool ChooseQuantizationForOperatorOutput(
if (output_index == LstmCellOperator::STATE_OUTPUT ||
output_index == LstmCellOperator::ACTIV_TEMP) {
*quantized_data_type = ArrayDataType::kInt16;
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
return true;
}
}
*quantized_data_type = GetQuantizedDataType(array, ArrayDataType::kUint8);
- GetQuantizationParams(*quantized_data_type, minmax, quantization_params);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ array, *quantized_data_type, quantization_params);
transformation->AddMessageF(
"For output array %s with min=%g, max=%g"
", chose to quantize as %s with zero_point=%d"
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc b/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
index 88ea0945e7..7a8515f6d1 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/quantize_weights.cc
@@ -36,10 +36,8 @@ void GetQuantizationParamsFromArray(const Array& array,
const std::vector<float>& float_vals =
array.GetBuffer<ArrayDataType::kFloat>().data;
auto minmax = std::minmax_element(float_vals.begin(), float_vals.end());
- MinMax toco_minmax;
- toco_minmax.min = *minmax.first;
- toco_minmax.max = *minmax.second;
- GetQuantizationParams(ArrayDataType::kUint8, toco_minmax, params);
+ *params = tflite::ChooseQuantizationParams<uint8>(
+ *minmax.first, *minmax.second, array.narrow_range);
}
} // namespace
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc b/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc
new file mode 100644
index 0000000000..5b41c49bfa
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/read_array_minmax_and_narrow_range_from_fake_quant.cc
@@ -0,0 +1,78 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+namespace {
+
+bool ApplyAttrsToArray(GraphTransformation* transformation, Model* model,
+ const FakeQuantOperator& fq_op,
+ const string& array_name) {
+ bool changed = false;
+ auto& annotated_array = model->GetArray(array_name);
+ if (!annotated_array.minmax) {
+ const MinMax& minmax = *fq_op.minmax;
+ annotated_array.GetOrCreateMinMax() = minmax;
+ transformation->AddMessageF(
+ "Read min/max annotation for array %s: min=%g, max=%g", array_name,
+ minmax.min, minmax.max);
+ changed = true;
+ }
+ if (fq_op.narrow_range && !annotated_array.narrow_range) {
+ annotated_array.narrow_range = true;
+ transformation->AddMessageF("Read narrow_range annotation for array %s",
+ array_name);
+ changed = true;
+ }
+ return changed;
+}
+
+} // end namespace
+
+bool ReadArrayMinmaxAndNarrowRangeFromFakeQuant::Run(Model* model,
+ std::size_t op_index) {
+ const auto fakequant_it = model->operators.begin() + op_index;
+ auto* fakequant_base_op = fakequant_it->get();
+ if (fakequant_base_op->type != OperatorType::kFakeQuant) {
+ return false;
+ }
+ auto* fq_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
+
+ if (!fq_op->minmax) {
+ // Need to be resolved first by ResolveFakeQuantArgsFromVars.
+ return false;
+ }
+
+ // At this point, this FakeQuantOperator should have a MinMax
+ // attached to it, and should only have 1 input (it should not have
+ // 2nd and 3rd input arrays giving min and max anymore).
+ CHECK(fq_op->minmax);
+ CHECK_EQ(1, fq_op->inputs.size());
+
+ return ApplyAttrsToArray(this, model, *fq_op, fq_op->inputs[0]) ||
+ ApplyAttrsToArray(this, model, *fq_op, fq_op->outputs[0]);
+}
+
+} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc b/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc
deleted file mode 100644
index bdcca5b7ca..0000000000
--- a/tensorflow/contrib/lite/toco/graph_transformations/read_fake_quant_min_max.cc
+++ /dev/null
@@ -1,112 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-#include <algorithm>
-#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
-
-#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
-#include "tensorflow/contrib/lite/toco/model.h"
-#include "tensorflow/contrib/lite/toco/tooling_util.h"
-#include "tensorflow/core/platform/logging.h"
-
-namespace toco {
-
-namespace {
-
-bool ApplyMinMaxToArray(GraphTransformation* transformation, Model* model,
- const MinMax& minmax, const string& array_name) {
- auto& annotated_array = model->GetArray(array_name);
- if (annotated_array.minmax) {
- return false;
- }
- annotated_array.GetOrCreateMinMax() = minmax;
- transformation->AddMessageF(
- "Read min/max annotation for array %s: min=%g, max=%g", array_name,
- minmax.min, minmax.max);
- return true;
-}
-
-} // end namespace
-
-bool ReadFakeQuantMinMax::Run(Model* model, std::size_t op_index) {
- const auto fakequant_it = model->operators.begin() + op_index;
- auto* fakequant_base_op = fakequant_it->get();
- if (fakequant_base_op->type != OperatorType::kFakeQuant) {
- return false;
- }
- auto* fakequant_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
-
- bool changed = false;
-
- if (!fakequant_op->minmax) {
- CHECK_EQ(fakequant_op->inputs.size(), 3);
- // We need to yield until the min and max parameters have been
- // resolved to constant arrays.
- for (int i = 1; i <= 2; i++) {
- if (!IsConstantParameterArray(*model, fakequant_op->inputs[1])) {
- return false;
- }
- }
-
- // Obtain the final min/max values
- const auto& min_array = model->GetArray(fakequant_op->inputs[1]);
- const auto& max_array = model->GetArray(fakequant_op->inputs[2]);
- CHECK_EQ(RequiredBufferSizeForShape(min_array.shape()), 1);
- CHECK_EQ(RequiredBufferSizeForShape(max_array.shape()), 1);
- fakequant_op->minmax.reset(new MinMax);
- MinMax& minmax = *fakequant_op->minmax;
- minmax.min = min_array.GetBuffer<ArrayDataType::kFloat>().data[0];
- minmax.max = max_array.GetBuffer<ArrayDataType::kFloat>().data[0];
- // We always want [min, max] to contain 0.
- if (minmax.min > 0 || minmax.max < 0) {
- LOG(ERROR) << "For " << LogName(*fakequant_op) << " the MinMax range "
- << "[" << minmax.min << ", " << minmax.max
- << "] does not contain 0. "
- << "Proceeding by tweaking it to contain 0, which will result "
- "in poor accuracy.";
- }
- minmax.min = std::min(minmax.min, 0.);
- minmax.max = std::max(minmax.max, 0.);
-
- // We won't use the input arrays that provided these min and max
- // values, anymore. Delete them unless they are used by something
- // else.
- for (int i = 1; i <= 2; i++) {
- if (CountOpsWithInput(*model, fakequant_op->inputs[i]) == 1) {
- model->EraseArray(fakequant_op->inputs[i]);
- }
- }
- fakequant_op->inputs.resize(1);
- changed = true;
- }
-
- // At this point, this FakeQuantOperator should have a MinMax
- // attached to it, and should only have 1 input (it should not have
- // 2nd and 3rd input arrays giving min and max anymore).
- CHECK(fakequant_op->minmax);
- CHECK_EQ(1, fakequant_op->inputs.size());
-
- const MinMax& minmax = *fakequant_op->minmax;
-
- // Record the MinMax info on the input and output arrays
- changed |= ApplyMinMaxToArray(this, model, minmax, fakequant_op->inputs[0]);
- changed |= ApplyMinMaxToArray(this, model, minmax, fakequant_op->outputs[0]);
-
- return changed;
-}
-
-} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/remove_trivial_reshape.cc b/tensorflow/contrib/lite/toco/graph_transformations/remove_trivial_reshape.cc
index 404f27e067..5295eeccec 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/remove_trivial_reshape.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/remove_trivial_reshape.cc
@@ -59,6 +59,15 @@ bool IsReshapeTrivial(const Model& model, const Operator& op,
if (CountOpsWithInput(model, op.outputs[0]) == 1) {
const auto* next_op = GetOpWithInput(model, op.outputs[0]);
if (next_op->type == OperatorType::kReshape) {
+ if (!IsDiscardableArray(model, next_op->outputs[0])) {
+ // If the |next_op| output is used as a model output we need to preserve
+ // its shape.
+ transformation->AddMessageF(
+ "%s cannot be merged into following reshape %s as it is "
+ "non-discardable and must keep the specified shape",
+ LogName(op), LogName(*next_op));
+ return false;
+ }
transformation->AddMessageF(
"%s is trivial because its output is only consumed by another "
"Reshape op %s",
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
index efb7bb2184..058f314b33 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_fake_quant.cc
@@ -25,6 +25,37 @@ limitations under the License.
namespace toco {
+template <ArrayDataType A>
+void GetBoundsForQuantizedDataType(double* min, double* max) {
+ using limits = std::numeric_limits<DataType<A>>;
+ *min = limits::min();
+ *max = limits::max();
+}
+
+void GetBoundsForQuantizedDataType(ArrayDataType quantized_data_type,
+ double* min, double* max) {
+ switch (quantized_data_type) {
+ case ArrayDataType::kUint8:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint8>(min, max);
+ case ArrayDataType::kInt8:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt8>(min, max);
+ case ArrayDataType::kUint16:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint16>(min, max);
+ case ArrayDataType::kInt16:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt16>(min, max);
+ case ArrayDataType::kUint32:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint32>(min, max);
+ case ArrayDataType::kInt32:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt32>(min, max);
+ case ArrayDataType::kUint64:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kUint64>(min, max);
+ case ArrayDataType::kInt64:
+ return GetBoundsForQuantizedDataType<ArrayDataType::kInt64>(min, max);
+ default:
+ LOG(FATAL) << "unhandled quantized data type";
+ }
+}
+
bool ResolveConstantFakeQuant::Run(Model* model, std::size_t op_index) {
const auto fakequant_it = model->operators.begin() + op_index;
const auto* fakequant_base_op = fakequant_it->get();
@@ -76,14 +107,21 @@ bool ResolveConstantFakeQuant::Run(Model* model, std::size_t op_index) {
const int size = input_buffer.data.size();
output_buffer.data.resize(size);
QuantizationParams qparams;
- GetQuantizationParamsFromMinMax<ArrayDataType::kUint8>(*fakequant_op->minmax,
- &qparams);
+ ChooseQuantizationParamsForArrayAndQuantizedDataType(
+ output_array, quantized_data_type, &qparams);
+ double quantized_min, quantized_max;
+ GetBoundsForQuantizedDataType(quantized_data_type, &quantized_min,
+ &quantized_max);
+ if (fakequant_op->narrow_range) {
+ quantized_min++;
+ }
+
for (int i = 0; i < size; i++) {
const double src_val = input_buffer.data[i];
const double unclamped_quantized_val =
std::round(qparams.zero_point + src_val / qparams.scale);
- const double quantized_val =
- std::min(255., std::max(0., unclamped_quantized_val));
+ const double quantized_val = std::min(
+ quantized_max, std::max(quantized_min, unclamped_quantized_val));
const double dst_val = qparams.scale * (quantized_val - qparams.zero_point);
output_buffer.data[i] = dst_val;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_gather.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_gather.cc
index debe298a5a..36d7dad0ce 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_gather.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_gather.cc
@@ -69,7 +69,7 @@ bool ResolveConstantGather::Run(Model* model, std::size_t op_index) {
}
const auto* op = static_cast<const GatherOperator*>(base_op);
- CHECK_EQ(op->inputs.size(), 2);
+ CHECK_GE(op->inputs.size(), 2);
CHECK_EQ(op->outputs.size(), 1);
auto& output_array = model->GetArray(op->outputs[0]);
if (output_array.data_type == ArrayDataType::kNone) {
@@ -81,10 +81,14 @@ bool ResolveConstantGather::Run(Model* model, std::size_t op_index) {
return false;
}
- // Only handling axis=0 for now.
- if (op->axis != 0) {
+ if (!op->axis) {
+ // Yield until axis has been set by ResolveGatherAttributes.
+ return false;
+ }
+ if (op->axis.value() != 0) {
+ // Only handling axis=0 for now.
AddMessageF("%s has axis %d; only axis=0 is supported", LogName(*op),
- op->axis);
+ op->axis.value());
return false;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_stack.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_pack.cc
index a4d5f1923a..e86616574d 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_stack.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_pack.cc
@@ -24,7 +24,7 @@ namespace toco {
namespace {
template <ArrayDataType Type>
-void Stack(Model* model, StackOperator const& op) {
+void Pack(Model* model, PackOperator const& op) {
auto& output_array = model->GetArray(op.outputs[0]);
CHECK(output_array.data_type == Type);
@@ -33,8 +33,8 @@ void Stack(Model* model, StackOperator const& op) {
output_array.GetMutableBuffer<Type>().data;
output_data.resize(RequiredBufferSizeForShape(output_array.shape()));
- // Stack inputs into buffer
- CHECK_EQ(op.axis, 0) << "Stacking only supported along first axis";
+ // Pack inputs into buffer
+ CHECK_EQ(op.axis, 0) << "Packing only supported along first axis";
int dst_offset = 0;
for (int i = 0; i < op.inputs.size(); i++) {
// Append array data to output for each input array
@@ -49,13 +49,13 @@ void Stack(Model* model, StackOperator const& op) {
} // namespace
-bool ResolveConstantStack::Run(Model* model, std::size_t op_index) {
+bool ResolveConstantPack::Run(Model* model, std::size_t op_index) {
auto it = model->operators.begin() + op_index;
const auto* base_op = it->get();
- if (base_op->type != OperatorType::kStack) {
+ if (base_op->type != OperatorType::kPack) {
return false;
}
- const auto* op = static_cast<const StackOperator*>(base_op);
+ const auto* op = static_cast<const PackOperator*>(base_op);
CHECK_GE(op->inputs.size(), 1);
CHECK_EQ(op->outputs.size(), 1);
@@ -82,24 +82,24 @@ bool ResolveConstantStack::Run(Model* model, std::size_t op_index) {
// Handle negative axis
axis += model->GetArray(op->inputs[0]).shape().dims().size();
}
- CHECK_EQ(axis, 0) << "Stacking only supported along 0th axis";
+ CHECK_EQ(axis, 0) << "Packing only supported along 0th axis";
CHECK(!output_array.buffer);
switch (output_array.data_type) {
case ArrayDataType::kFloat:
- Stack<ArrayDataType::kFloat>(model, *op);
+ Pack<ArrayDataType::kFloat>(model, *op);
break;
case ArrayDataType::kUint8:
- Stack<ArrayDataType::kUint8>(model, *op);
+ Pack<ArrayDataType::kUint8>(model, *op);
break;
case ArrayDataType::kInt32:
- Stack<ArrayDataType::kInt32>(model, *op);
+ Pack<ArrayDataType::kInt32>(model, *op);
break;
case ArrayDataType::kInt64:
- Stack<ArrayDataType::kInt64>(model, *op);
+ Pack<ArrayDataType::kInt64>(model, *op);
break;
default:
- LOG(FATAL) << "Unsupported data type given to Stack op with output \""
+ LOG(FATAL) << "Unsupported data type given to Pack op with output \""
<< op->outputs[0] << "\"";
break;
}
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
index f89ef85fdb..fe3882c28d 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_constant_unary.cc
@@ -57,8 +57,8 @@ bool ResolveConstantUnaryOperator::Run(Model* model, std::size_t op_index) {
case OperatorType::kSqrt:
case OperatorType::kSquare:
case OperatorType::kSum:
- case OperatorType::kMin: // Reduction Min
- case OperatorType::kMax: // Reduction Max
+ case OperatorType::kReduceMin: // Reduction Min
+ case OperatorType::kReduceMax: // Reduction Max
case OperatorType::kReshape:
case OperatorType::kRelu6:
case OperatorType::kRelu1:
@@ -196,7 +196,7 @@ bool ResolveConstantUnaryOperator::Run(Model* model, std::size_t op_index) {
}
output_float_data[i] = sum;
}
- } else if (unary_op->type == OperatorType::kMin) {
+ } else if (unary_op->type == OperatorType::kReduceMin) {
// At the moment only full reduction across all dimensions is supported.
// TODO(starka): Output should not be padded.
for (int i = 0; i < output_dims_count; i++) {
@@ -207,7 +207,7 @@ bool ResolveConstantUnaryOperator::Run(Model* model, std::size_t op_index) {
min = std::min(min, (*input_float_data)[i]);
}
output_float_data[0] = min;
- } else if (unary_op->type == OperatorType::kMax) {
+ } else if (unary_op->type == OperatorType::kReduceMax) {
// At the moment only full reduction across all dimensions is supported.
// TODO(starka): Output should not be padded.
for (int i = 0; i < output_dims_count; i++) {
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc
new file mode 100644
index 0000000000..0dda1fd0b3
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_fake_quant_args_from_vars.cc
@@ -0,0 +1,80 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <algorithm>
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+bool ResolveFakeQuantArgsFromVars::Run(Model* model, std::size_t op_index) {
+ const auto fakequant_it = model->operators.begin() + op_index;
+ auto* fakequant_base_op = fakequant_it->get();
+ if (fakequant_base_op->type != OperatorType::kFakeQuant) {
+ return false;
+ }
+ auto* fakequant_op = static_cast<FakeQuantOperator*>(fakequant_base_op);
+
+ if (fakequant_op->minmax) {
+ // Already resolved.
+ return false;
+ }
+
+ CHECK_EQ(fakequant_op->inputs.size(), 3);
+ // We need to yield until the min and max parameters have been
+ // resolved to constant arrays.
+ for (int i = 1; i <= 2; i++) {
+ if (!IsConstantParameterArray(*model, fakequant_op->inputs[i])) {
+ return false;
+ }
+ }
+
+ // Obtain the final min/max values
+ const auto& min_array = model->GetArray(fakequant_op->inputs[1]);
+ const auto& max_array = model->GetArray(fakequant_op->inputs[2]);
+ CHECK_EQ(RequiredBufferSizeForShape(min_array.shape()), 1);
+ CHECK_EQ(RequiredBufferSizeForShape(max_array.shape()), 1);
+ fakequant_op->minmax.reset(new MinMax);
+ MinMax& minmax = *fakequant_op->minmax;
+ minmax.min = min_array.GetBuffer<ArrayDataType::kFloat>().data[0];
+ minmax.max = max_array.GetBuffer<ArrayDataType::kFloat>().data[0];
+ // We always want [min, max] to contain 0.
+ if (minmax.min > 0 || minmax.max < 0) {
+ LOG(ERROR) << "For " << LogName(*fakequant_op) << " the MinMax range "
+ << "[" << minmax.min << ", " << minmax.max
+ << "] does not contain 0. "
+ << "Proceeding by tweaking it to contain 0, which will result "
+ "in poor accuracy.";
+ }
+ minmax.min = std::min(minmax.min, 0.);
+ minmax.max = std::max(minmax.max, 0.);
+
+ // We won't use the input arrays that provided these min and max
+ // values, anymore. Delete them unless they are used by something
+ // else.
+ for (int i = 1; i <= 2; i++) {
+ DeleteArrayIfUsedOnce(fakequant_op->inputs[i], model);
+ }
+ fakequant_op->inputs.resize(1);
+ return true;
+}
+
+} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_gather_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_gather_attributes.cc
new file mode 100644
index 0000000000..ce825c91af
--- /dev/null
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_gather_attributes.cc
@@ -0,0 +1,53 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "tensorflow/contrib/lite/toco/graph_transformations/graph_transformations.h"
+#include "tensorflow/contrib/lite/toco/model.h"
+#include "tensorflow/contrib/lite/toco/tooling_util.h"
+#include "tensorflow/core/platform/logging.h"
+
+namespace toco {
+
+bool ResolveGatherAttributes::Run(Model* model, std::size_t op_index) {
+ auto* gather_op = model->operators[op_index].get();
+ if (gather_op->type != OperatorType::kGather) return false;
+ auto* op = static_cast<GatherOperator*>(gather_op);
+
+ if (op->axis) {
+ // Attributes already resolved
+ return false;
+ }
+ if (op->inputs.size() != 3) return false;
+ if (!IsConstantParameterArray(*model, op->inputs[2])) return false;
+
+ const auto& indices_array = model->GetArray(op->inputs[2]);
+ if (!indices_array.has_shape()) return false;
+ const auto& axis_data = indices_array.GetBuffer<ArrayDataType::kInt32>().data;
+ CHECK_EQ(axis_data.size(), 1)
+ << "Multidimensional gather not supported on " << LogName(*op);
+ op->axis = {axis_data[0]};
+
+ // Drop the axis array as we no longer need it.
+ DeleteArrayIfUsedOnce(op->inputs[2], model);
+ op->inputs.resize(2);
+
+ return true;
+}
+
+} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
index 013b50ac9b..7d456af2fb 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/resolve_mean_attributes.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/resolve_reduce_attributes.cc
@@ -24,11 +24,8 @@ limitations under the License.
namespace toco {
-bool ResolveMeanAttributes::Run(Model* model, std::size_t op_index) {
- auto* mean_op = model->operators[op_index].get();
- if (mean_op->type != OperatorType::kMean) return false;
- auto* op = static_cast<MeanOperator*>(mean_op);
-
+template <typename T>
+bool ResolveAttributes(Model* model, T* op) {
if (!op->axis.empty()) {
// Attributes already resolved
return false;
@@ -36,10 +33,28 @@ bool ResolveMeanAttributes::Run(Model* model, std::size_t op_index) {
if (op->inputs.size() != 2) return false;
if (!IsConstantParameterArray(*model, op->inputs[1])) return false;
- const auto& indices_array = model->GetArray(op->inputs[1]);
+ const Array& indices_array = model->GetArray(op->inputs[1]);
if (!indices_array.has_shape()) return false;
op->axis = indices_array.GetBuffer<ArrayDataType::kInt32>().data;
return true;
}
+bool ResolveReduceAttributes::Run(Model* model, std::size_t op_index) {
+ Operator* op = model->operators[op_index].get();
+ switch (op->type) {
+ case OperatorType::kMean:
+ return ResolveAttributes(model, static_cast<MeanOperator*>(op));
+ case OperatorType::kSum:
+ return ResolveAttributes(model, static_cast<TensorFlowSumOperator*>(op));
+ case OperatorType::kReduceProd:
+ return ResolveAttributes(model, static_cast<TensorFlowProdOperator*>(op));
+ case OperatorType::kReduceMin:
+ return ResolveAttributes(model, static_cast<TensorFlowMinOperator*>(op));
+ case OperatorType::kReduceMax:
+ return ResolveAttributes(model, static_cast<TensorFlowMaxOperator*>(op));
+ default:
+ return false;
+ }
+}
+
} // namespace toco
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/tests/BUILD b/tensorflow/contrib/lite/toco/graph_transformations/tests/BUILD
index 95e8433be2..e163fc9ae1 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/tests/BUILD
+++ b/tensorflow/contrib/lite/toco/graph_transformations/tests/BUILD
@@ -10,6 +10,7 @@ load(
tf_cc_test(
name = "lstm_utils_test",
srcs = ["lstm_utils_test.cc"],
+ tags = ["no_oss"],
deps = [
"//tensorflow/contrib/lite/toco:graph_transformations",
"//tensorflow/contrib/lite/toco:model",
@@ -21,6 +22,7 @@ tf_cc_test(
tf_cc_test(
name = "quantize_weights_test",
srcs = ["quantize_weights_test.cc"],
+ tags = ["no_oss"],
deps = [
"//tensorflow/contrib/lite/toco:graph_transformations",
"//tensorflow/contrib/lite/toco:model",
@@ -33,6 +35,7 @@ tf_cc_test(
tf_cc_test(
name = "resolve_constant_concatenation_test",
srcs = ["resolve_constant_concatenation_test.cc"],
+ tags = ["no_oss"],
deps = [
"//tensorflow/contrib/lite/toco:graph_transformations",
"//tensorflow/contrib/lite/toco:model",
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/unfuse_activation_functions.cc b/tensorflow/contrib/lite/toco/graph_transformations/unfuse_activation_functions.cc
index 2c7046c8c7..69bad2fa89 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/unfuse_activation_functions.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/unfuse_activation_functions.cc
@@ -64,7 +64,14 @@ bool UnfuseActivationFunctions::Run(Model* model, std::size_t op_index) {
const string& tmp_array_name =
AvailableArrayName(*model, op->outputs[0] + "_unfused");
CHECK(!model->HasArray(tmp_array_name));
- model->GetOrCreateArray(tmp_array_name);
+
+ const auto& output_array = model->GetArray(op->outputs[0]);
+ auto& tmp_array = model->GetOrCreateArray(tmp_array_name);
+ if (output_array.quantization_params) {
+ tmp_array.GetOrCreateQuantizationParams() =
+ output_array.GetQuantizationParams();
+ }
+
ac_op->inputs = {tmp_array_name};
op->outputs = {tmp_array_name};
return true;
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/unpartition_embedding_lookup.cc b/tensorflow/contrib/lite/toco/graph_transformations/unpartition_embedding_lookup.cc
index cbea39bcc0..dd9e26e68b 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/unpartition_embedding_lookup.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/unpartition_embedding_lookup.cc
@@ -187,6 +187,7 @@ bool UnpartitionEmbeddingLookup::Run(Model* model, std::size_t op_index) {
AvailableArrayName(*model, gather_ops[0]->inputs[0] + "_permuted/perm"));
gather_params_permute_op->outputs.push_back(
AvailableArrayName(*model, gather_ops[0]->inputs[0] + "_permuted"));
+ gather_params_permute_op->axis = {0};
op_it = model->operators.emplace(op_it, gather_params_permute_op) + 1;
model->GetOrCreateArray(gather_params_permute_op->outputs[0]);
const auto& partition_array = model->GetArray(gather_ops[0]->inputs[0]);
@@ -212,6 +213,7 @@ bool UnpartitionEmbeddingLookup::Run(Model* model, std::size_t op_index) {
mod_op->inputs[0]};
merged_gather_op->outputs = {stitch_op->outputs[0]};
merged_gather_op->input_rank = partition_array.shape().dimensions_count();
+ merged_gather_op->axis = {0};
model->operators.emplace(op_it, merged_gather_op);
AddMessageF(
diff --git a/tensorflow/contrib/lite/toco/graph_transformations/unroll_batch_matmul.cc b/tensorflow/contrib/lite/toco/graph_transformations/unroll_batch_matmul.cc
index da81ea2ff3..5f0cece67a 100644
--- a/tensorflow/contrib/lite/toco/graph_transformations/unroll_batch_matmul.cc
+++ b/tensorflow/contrib/lite/toco/graph_transformations/unroll_batch_matmul.cc
@@ -76,7 +76,7 @@ bool UnrollBatchMatMul::Run(Model* model, std::size_t op_index) {
AddMessageF("Unrolling BatchMatMul %s %d times", LogName(*batch_op),
batch_count);
auto tail_it = batch_op_it;
- std::vector<string> stack_inputs;
+ std::vector<string> pack_inputs;
for (int batch = 0; batch < batch_count; ++batch) {
std::string batch_name =
std::string(batch_op->outputs[0]) + "_b" + std::to_string(batch);
@@ -146,15 +146,15 @@ bool UnrollBatchMatMul::Run(Model* model, std::size_t op_index) {
tail_it = model->operators.emplace(tail_it, matmul_op) + 1;
// Add to stack.
- stack_inputs.push_back(matmul_op->outputs[0]);
+ pack_inputs.push_back(matmul_op->outputs[0]);
}
- // The stack that will join all the individual matmul results together.
- auto* stack_op = new StackOperator;
- stack_op->inputs = stack_inputs;
- stack_op->outputs = {batch_op->outputs[0]};
- stack_op->axis = 0;
- model->operators.emplace(tail_it, stack_op);
+ // The pack that will join all the individual matmul results together.
+ auto* pack_op = new PackOperator;
+ pack_op->inputs = pack_inputs;
+ pack_op->outputs = {batch_op->outputs[0]};
+ pack_op->axis = 0;
+ model->operators.emplace(tail_it, pack_op);
// Remove the old batch matmul now that we've unrolled.
batch_op_it = model->operators.begin();
diff --git a/tensorflow/contrib/lite/toco/import_tensorflow.cc b/tensorflow/contrib/lite/toco/import_tensorflow.cc
index bc439a2feb..032c863945 100644
--- a/tensorflow/contrib/lite/toco/import_tensorflow.cc
+++ b/tensorflow/contrib/lite/toco/import_tensorflow.cc
@@ -755,6 +755,9 @@ tensorflow::Status ConvertFakeQuantWithMinMaxArgs(
op->outputs.push_back(node.name());
// tf.fake_quant_with_min_max_args num_bits defaults to 8.
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
+ if (HasAttr(node, "narrow_range")) {
+ op->narrow_range = GetBoolAttr(node, "narrow_range");
+ }
model->operators.emplace_back(op);
return tensorflow::Status::OK();
}
@@ -774,6 +777,9 @@ tensorflow::Status ConvertFakeQuantWithMinMaxVars(
}
op->outputs.push_back(node.name());
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
+ if (HasAttr(node, "narrow_range")) {
+ op->narrow_range = GetBoolAttr(node, "narrow_range");
+ }
model->operators.emplace_back(op);
return tensorflow::Status::OK();
}
@@ -799,22 +805,6 @@ tensorflow::Status ConvertSqueezeOperator(
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertSumOperator(
- const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
- Model* model) {
- CHECK_EQ(node.op(), "Sum");
- TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new TensorFlowSumOperator;
- op->inputs.push_back(node.input(0));
- op->inputs.push_back(node.input(1));
- op->outputs.push_back(node.name());
- model->operators.emplace_back(op);
- if (HasAttr(node, "keep_dims")) {
- op->keep_dims = GetBoolAttr(node, "keep_dims");
- }
- return tensorflow::Status::OK();
-}
-
tensorflow::Status ConvertSplitOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
@@ -1052,38 +1042,6 @@ tensorflow::Status ConvertSimpleOperator(
return ConvertSimpleOperator<Op>(node, tf_import_flags, model);
}
-tensorflow::Status ConvertMaxOperator(
- const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
- Model* model) {
- CHECK_EQ(node.op(), "Max");
- TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new TensorFlowMaxOperator;
- op->inputs.push_back(node.input(0));
- op->inputs.push_back(node.input(1));
- op->outputs.push_back(node.name());
- model->operators.emplace_back(op);
- if (HasAttr(node, "keep_dims")) {
- op->keep_dims = GetBoolAttr(node, "keep_dims");
- }
- return tensorflow::Status::OK();
-}
-
-tensorflow::Status ConvertMinOperator(
- const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
- Model* model) {
- CHECK_EQ(node.op(), "Min");
- TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new TensorFlowMinOperator;
- op->inputs.push_back(node.input(0));
- op->inputs.push_back(node.input(1));
- op->outputs.push_back(node.name());
- model->operators.emplace_back(op);
- if (HasAttr(node, "keep_dims")) {
- op->keep_dims = GetBoolAttr(node, "keep_dims");
- }
- return tensorflow::Status::OK();
-}
-
tensorflow::Status ConvertUnsupportedOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
@@ -1223,8 +1181,17 @@ tensorflow::Status ConvertGatherOperator(
auto* op = new GatherOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
- // TODO(ahentz): we currently ignore the third tensor in GatherV2 but we
- // should read it an pass it on to the TF Lite Interpreter.
+ if (node.input_size() >= 3) {
+ // GatherV2 form where we are provided an axis. It may be either a constant
+ // or runtime defined value, so we just wire up the array and let
+ // ResolveGatherAttributes take care of it later on.
+ const auto axis_data_type = GetDataTypeAttr(node, "Taxis");
+ CHECK(axis_data_type == DT_INT32 || axis_data_type == DT_INT64);
+ op->inputs.push_back(node.input(2));
+ } else {
+ // Gather form that assumes axis=0.
+ op->axis = {0};
+ }
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return tensorflow::Status::OK();
@@ -1406,12 +1373,12 @@ tensorflow::Status ConvertBatchToSpaceNDOperator(
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertMeanOperator(
+template <typename T>
+tensorflow::Status ConvertReduceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
- CHECK_EQ(node.op(), "Mean");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
- auto* op = new MeanOperator;
+ auto* op = new T;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
@@ -1544,11 +1511,15 @@ tensorflow::Status ConvertRangeOperator(
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertStackOperator(
+// Note that it's easy to confuse/conflate "Stack" and "Pack" operators, but
+// they aren't the same thing. tf.stack results in a "Pack" operator. "Stack"
+// operators also exist, but involve manipulating the TF runtime stack, and are
+// not directly related to tf.stack() usage.
+tensorflow::Status ConvertPackOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
Model* model) {
- CHECK((node.op() == "Stack") || (node.op() == "Pack"));
- auto* op = new StackOperator;
+ CHECK_EQ(node.op(), "Pack");
+ auto op = absl::make_unique<PackOperator>();
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_GE(num_inputs, 1)
<< node.op()
@@ -1558,10 +1529,11 @@ tensorflow::Status ConvertStackOperator(
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
- // Both "Stack" and "Pack" have the "axis" attribute.
+ op->values_count = HasAttr(node, "N") ? GetIntAttr(node, "N") : num_inputs;
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : 0;
+ op->dtype = ConvertDataType(toco::GetDataTypeAttr(node, "T"));
op->outputs.push_back(node.name());
- model->operators.emplace_back(op);
+ model->operators.emplace_back(std::move(op));
return tensorflow::Status::OK();
}
@@ -1607,6 +1579,24 @@ tensorflow::Status ConvertShapeOperator(
return tensorflow::Status::OK();
}
+tensorflow::Status ConvertAnyOperator(
+ const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
+ Model* model) {
+ CHECK_EQ(node.op(), "Any");
+ TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
+ const auto idx_type =
+ HasAttr(node, "Tidx") ? GetDataTypeAttr(node, "Tidx") : DT_INT32;
+ CHECK(idx_type == DT_INT32);
+ auto op = absl::make_unique<AnyOperator>();
+ op->inputs.push_back(node.input(0));
+ op->inputs.push_back(node.input(1));
+ op->outputs.push_back(node.name());
+ op->keep_dims =
+ HasAttr(node, "keep_dims") ? GetBoolAttr(node, "keep_dims") : false;
+ model->operators.push_back(std::move(op));
+ return tensorflow::Status::OK();
+}
+
void StripCaretFromArrayNames(Model* model) {
for (auto& op : model->operators) {
for (auto& input : op->inputs) {
@@ -1842,6 +1832,7 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
{"Add", ConvertSimpleOperator<AddOperator, 2>},
{"AddN", ConvertSimpleOperator<AddNOperator>},
{"All", ConvertSimpleOperator<TensorFlowAllOperator>},
+ {"Any", ConvertAnyOperator},
{"ArgMax", ConvertArgMinMaxOperator<ArgMaxOperator, kArgMax>},
{"ArgMin", ConvertArgMinMaxOperator<ArgMinOperator, kArgMin>},
{"Assert", ConvertSimpleOperator<TensorFlowAssertOperator>},
@@ -1884,28 +1875,30 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
{"Less", ConvertSimpleOperator<TensorFlowLessOperator, 2>},
{"LessEqual", ConvertSimpleOperator<TensorFlowLessEqualOperator, 2>},
{"Log", ConvertSimpleOperator<LogOperator, 1>},
- {"Log", ConvertSimpleOperator<LogOperator, 1>},
{"LogSoftmax", ConvertSimpleOperator<LogSoftmaxOperator, 1>},
+ {"LogicalAnd", ConvertSimpleOperator<LogicalAndOperator, 2>},
+ {"LogicalNot", ConvertSimpleOperator<LogicalNotOperator, 1>},
{"MatMul", ConvertMatMulOperator},
- {"Max", ConvertMaxOperator},
+ {"Max", ConvertReduceOperator<TensorFlowMaxOperator>},
{"MaxPool", ConvertMaxPoolOperator},
{"Maximum", ConvertSimpleOperator<TensorFlowMaximumOperator, 2>},
- {"Mean", ConvertMeanOperator},
+ {"Mean", ConvertReduceOperator<MeanOperator>},
{"Merge", ConvertSimpleOperator<TensorFlowMergeOperator, 2>},
- {"Min", ConvertMinOperator},
+ {"Min", ConvertReduceOperator<TensorFlowMinOperator>},
{"Minimum", ConvertSimpleOperator<TensorFlowMinimumOperator, 2>},
{"Mul", ConvertSimpleOperator<MulOperator, 2>},
{"Neg", ConvertSimpleOperator<NegOperator, 1>},
{"NextIteration", ConvertOperatorSpecialCasedAsRNNBackEdge},
{"NoOp", ConvertNoOpOperator},
{"NotEqual", ConvertSimpleOperator<TensorFlowNotEqualOperator, 2>},
- {"Pack", ConvertStackOperator},
+ {"Pack", ConvertPackOperator},
{"Pad", ConvertSimpleOperator<PadOperator, 2>},
{"PadV2", ConvertSimpleOperator<PadV2Operator, 3>},
{"ParallelDynamicStitch", ConvertDynamicStitchOperator},
{"Placeholder", ConvertPlaceholderOperator},
{"PlaceholderWithDefault", ConvertIdentityOperator},
{"Pow", ConvertSimpleOperator<PowOperator, 2>},
+ {"Prod", ConvertReduceOperator<TensorFlowProdOperator>},
{"RandomUniform", ConvertRandomUniform},
{"Range", ConvertRangeOperator},
{"Rank", ConvertSimpleOperator<RankOperator, 1>},
@@ -1928,11 +1921,10 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
{"Sqrt", ConvertSimpleOperator<TensorFlowSqrtOperator, 1>},
{"Square", ConvertSimpleOperator<TensorFlowSquareOperator, 1>},
{"Squeeze", ConvertSqueezeOperator},
- {"Stack", ConvertStackOperator},
{"StopGradient", ConvertIdentityOperator},
{"StridedSlice", ConvertStridedSliceOperator},
{"Sub", ConvertSimpleOperator<SubOperator, 2>},
- {"Sum", ConvertSumOperator},
+ {"Sum", ConvertReduceOperator<TensorFlowSumOperator>},
{"Svdf", ConvertSvdfOperator},
{"Switch", ConvertSwitchOperator},
{"Tanh", ConvertSimpleOperator<TanhOperator, 1>},
diff --git a/tensorflow/contrib/lite/toco/model.h b/tensorflow/contrib/lite/toco/model.h
index 8660464fdb..d629787939 100644
--- a/tensorflow/contrib/lite/toco/model.h
+++ b/tensorflow/contrib/lite/toco/model.h
@@ -23,6 +23,7 @@ limitations under the License.
#include <unordered_map>
#include <vector>
+#include "absl/types/optional.h"
#include "tensorflow/contrib/lite/toco/model_flags.pb.h"
#include "tensorflow/contrib/lite/toco/runtime/types.h"
#include "tensorflow/contrib/lite/toco/toco_port.h"
@@ -81,10 +82,11 @@ enum class OperatorType : uint8 {
kResizeBilinear,
kSin,
kSpaceToBatchND,
- kStack,
+ kPack,
kBatchToSpaceND,
kPad,
kPadV2,
+ kReduceProd, // Reduction product
kStridedSlice,
kSlice,
kSqueeze,
@@ -106,10 +108,10 @@ enum class OperatorType : uint8 {
kIdentity,
kLess,
kLessEqual,
- kMax, // Reduction Max
- kMaximum, // Element-wise Maximum
- kMin, // Reduction Min
- kMinimum, // Element-wise Minimum
+ kReduceMax, // Reduction Max
+ kMaximum, // Element-wise Maximum
+ kReduceMin, // Reduction Min
+ kMinimum, // Element-wise Minimum
kMatMul,
kMerge,
kNeg,
@@ -141,6 +143,9 @@ enum class OperatorType : uint8 {
kNotEqual,
kPow,
kArgMin,
+ kAny,
+ kLogicalAnd,
+ kLogicalNot,
};
// Helper to deal with TensorFlow arrays using a different ordering of
@@ -791,6 +796,7 @@ struct FakeQuantOperator : Operator {
FakeQuantOperator() : Operator(OperatorType::kFakeQuant) {}
std::unique_ptr<MinMax> minmax;
int num_bits = 8;
+ bool narrow_range = false;
};
// Element-wise division operator.
@@ -1155,10 +1161,12 @@ struct TensorFlowRsqrtOperator : Operator {
// Inputs: this operator accepts any number >= 1 of inputs.
// inputs[i]: the i-th array to merge.
//
-// TensorFlow equivalent: Stack or Pack
-struct StackOperator : Operator {
- StackOperator() : Operator(OperatorType::kStack) {}
+// TensorFlow equivalent: Pack
+struct PackOperator : Operator {
+ PackOperator() : Operator(OperatorType::kPack) {}
+ int values_count;
int axis = 0;
+ ArrayDataType dtype = ArrayDataType::kNone;
};
// Shape operator. Extracts the shape of the tensor.
@@ -1228,6 +1236,19 @@ struct SubOperator : Operator {
// TensorFlow equivalent: Sum
struct TensorFlowSumOperator : Operator {
TensorFlowSumOperator() : Operator(OperatorType::kSum) {}
+ std::vector<int> axis;
+ bool keep_dims = false;
+};
+
+// Prod reduction: computes the product of all of entries across the axes.
+//
+// Inputs:
+// inputs[0]: required: the input array
+//
+// TensorFlow equivalent: Prod
+struct TensorFlowProdOperator : Operator {
+ TensorFlowProdOperator() : Operator(OperatorType::kReduceProd) {}
+ std::vector<int> axis;
bool keep_dims = false;
};
@@ -1387,29 +1408,27 @@ struct TensorFlowNotEqualOperator : Operator {
TensorFlowNotEqualOperator() : Operator(OperatorType::kNotEqual) {}
};
-// Global max reduction: computes the max of all of entries in the input array.
-// Thus the output is "0-dimensional": it consists of a single scalar value.
+// Max reduction: computes the max of all of entries across the axes.
//
// Inputs:
// inputs[0]: required: the input array
//
-// TensorFlow equivalent: Max --- except that we only support the special case
-// of global reduction across all dimensions.
+// TensorFlow equivalent: Max
struct TensorFlowMaxOperator : Operator {
- TensorFlowMaxOperator() : Operator(OperatorType::kMax) {}
+ TensorFlowMaxOperator() : Operator(OperatorType::kReduceMax) {}
+ std::vector<int> axis;
bool keep_dims = false;
};
-// Global min reduction: computes the min of all of entries in the input array.
-// Thus the output is "0-dimensional": it consists of a single scalar value.
+// Min reduction: computes the min of all of entries across the axes.
//
// Inputs:
// inputs[0]: required: the input array
//
-// TensorFlow equivalent: Min --- except that we only support the special case
-// of global reduction across all dimensions.
+// TensorFlow equivalent: Min
struct TensorFlowMinOperator : Operator {
- TensorFlowMinOperator() : Operator(OperatorType::kMin) {}
+ TensorFlowMinOperator() : Operator(OperatorType::kReduceMin) {}
+ std::vector<int> axis;
bool keep_dims = false;
};
@@ -1510,11 +1529,15 @@ struct FloorOperator : Operator {
// Inputs:
// inputs[0]: required: the params array
// inputs[1]: required: the indices to gather
+// inputs[2]: optional: axis
//
// TensorFlow equivalent: Gather
struct GatherOperator : Operator {
GatherOperator() : Operator(OperatorType::kGather) {}
- int axis = 0;
+ // Axis is populated explicitly or implicitly from the axis input by
+ // ResolveGatherAttributes. An empty axis indicates that the axis has not yet
+ // be resolved.
+ absl::optional<int> axis;
int input_rank = 0;
};
@@ -1670,6 +1693,39 @@ struct PowOperator : Operator {
PowOperator() : Operator(OperatorType::kPow) {}
};
+// Any operator:
+//
+// Inputs:
+// Inputs[0]: required: A boolean input tensor.
+// Inputs[1]: required: reduction_indices.
+//
+// TensorFlow equivalent: tf.reduce_any.
+struct AnyOperator : Operator {
+ AnyOperator() : Operator(OperatorType::kAny) {}
+ bool keep_dims = false;
+};
+
+// LogicalAnd operator:
+//
+// Inputs:
+// Inputs[0]: required: A boolean tensor.
+// Inputs[1]: required: A boolean tensor.
+//
+// TensorFlow equivalent: tf.logical_and.
+struct LogicalAndOperator : Operator {
+ LogicalAndOperator() : Operator(OperatorType::kLogicalAnd) {}
+};
+
+// LogicalNot operator:
+//
+// Inputs:
+// Inputs[0]: required: A boolean tensor.
+//
+// TensorFlow equivalent: tf.logical_not.
+struct LogicalNotOperator : Operator {
+ LogicalNotOperator() : Operator(OperatorType::kLogicalNot) {}
+};
+
// Alloc's are used for transient arrays only. An Alloc specifies which interval
// of the "transient_data" workspace buffer passed to inference functions, is to
// be used for the transient array at hand. The 'start' and 'end' values are
@@ -1854,6 +1910,40 @@ struct Array {
// If this is non-null, then these quantization parameters are to be used
// to assign a meaning as real numbers to the elements of this array.
std::unique_ptr<QuantizationParams> quantization_params;
+ // narrow_range is a detail of how toco handles FakeQuant operators with
+ // narrow_range, see
+ // https://www.tensorflow.org/api_docs/python/tf/fake_quant_with_min_max_vars
+ //
+ // For more context about what that is useful for, see the big comment in
+ // graph_transformations/ensure_uint8_weights_safe_for_fast_int8_kernels.cc
+ //
+ // The narrow_range flag applies only to quantized arrays, and changes
+ // their quantization in the following way when it is set to 'true':
+ // 1. The computation of {zero_point, scale} from {min, max} needs to be
+ // amended so that the real min value will get quantized to
+ // (min_quantized_value + 1) instead of just (min_quantized_value).
+ // E.g. for uint8 quantization, the real min value should get quantized to
+ // the uint8 value 1, not 0.
+ // 2. Quantized values should get clamped to the interval
+ // [min_quantized_value + 1, max_value]. Equivalently, the
+ // min_quantized_value should get nudged to (min_quantized_value + 1).
+ // The reason why 1. does not imply 2. is that real values may not belong to
+ // the stated [min, max] interval. Concretely, weights recorded at the last
+ // learning step may not fall in the [min, max] interval recorded over
+ // previous learning steps, as the values evolve across learning steps.
+ //
+ // Rationale why this is directly a field on Array:
+ // - This can't be just a field on FakeQuantOperator, because
+ // FakeQuantOperators are gone (DropFakeQuant) before we get to using that
+ // information (Quantize). We need a place to store that bit in the interim.
+ // - This can't be in QuantizationParams because we need to record this
+ // ahead of quantization, and QuantizationParams are only created during
+ // quantization.
+ // - This could be in MinMax, but that would be an abuse of what MinMax is
+ // about, and would break existing code that assumes that a MinMax is just
+ // a min and a max. Unlike MinMax which is agnostic as to the quantized
+ // data type, narrow_range refers to values in the quantized data type.
+ bool narrow_range = false;
private:
std::unique_ptr<Shape> array_shape;
diff --git a/tensorflow/contrib/lite/toco/model_cmdline_flags.cc b/tensorflow/contrib/lite/toco/model_cmdline_flags.cc
index 06072d1fcb..d34da63e43 100644
--- a/tensorflow/contrib/lite/toco/model_cmdline_flags.cc
+++ b/tensorflow/contrib/lite/toco/model_cmdline_flags.cc
@@ -322,6 +322,10 @@ void ReadModelFlagsFromCommandLineFlags(
for (int i = 0; i < input_shapes.size(); ++i) {
auto* shape = model_flags->mutable_input_arrays(i)->mutable_shape();
shape->clear_dims();
+ // Treat an empty input shape as a scalar.
+ if (input_shapes[i].empty()) {
+ continue;
+ }
for (const auto& dim_str : absl::StrSplit(input_shapes[i], ',')) {
int size;
CHECK(absl::SimpleAtoi(dim_str, &size))
diff --git a/tensorflow/contrib/lite/toco/python/BUILD b/tensorflow/contrib/lite/toco/python/BUILD
index 93fe756a55..33c5b16462 100644
--- a/tensorflow/contrib/lite/toco/python/BUILD
+++ b/tensorflow/contrib/lite/toco/python/BUILD
@@ -53,5 +53,8 @@ tf_py_test(
data = [
":toco_from_protos",
],
- tags = ["no_pip"],
+ tags = [
+ "no_oss",
+ "no_pip",
+ ],
)
diff --git a/tensorflow/contrib/lite/toco/tensorflow_graph_matching/BUILD b/tensorflow/contrib/lite/toco/tensorflow_graph_matching/BUILD
index 336e94de1e..ea1fc2827e 100644
--- a/tensorflow/contrib/lite/toco/tensorflow_graph_matching/BUILD
+++ b/tensorflow/contrib/lite/toco/tensorflow_graph_matching/BUILD
@@ -60,6 +60,7 @@ cc_library(
tf_cc_test(
name = "resolve_svdf_test",
srcs = ["resolve_svdf_test.cc"],
+ tags = ["no_oss"],
deps = [
":cluster",
":cluster_utils",
diff --git a/tensorflow/contrib/lite/toco/tflite/BUILD b/tensorflow/contrib/lite/toco/tflite/BUILD
index a02f90988b..83e977d7b3 100644
--- a/tensorflow/contrib/lite/toco/tflite/BUILD
+++ b/tensorflow/contrib/lite/toco/tflite/BUILD
@@ -37,6 +37,7 @@ tf_cc_test(
srcs = [
"operator_test.cc",
],
+ tags = ["no_oss"],
deps = [
":operator",
"//tensorflow/contrib/lite/toco:tooling_util",
@@ -66,6 +67,7 @@ tf_cc_test(
srcs = [
"types_test.cc",
],
+ tags = ["no_oss"],
deps = [
":types",
"@com_google_googletest//:gtest_main",
@@ -98,6 +100,7 @@ tf_cc_test(
srcs = [
"export_test.cc",
],
+ tags = ["no_oss"],
deps = [
":export",
"//tensorflow/contrib/lite/schema:schema_fbs",
@@ -131,6 +134,7 @@ tf_cc_test(
srcs = [
"import_test.cc",
],
+ tags = ["no_oss"],
deps = [
":import",
"//tensorflow/contrib/lite:schema_fbs_version",
diff --git a/tensorflow/contrib/lite/toco/tflite/export_test.cc b/tensorflow/contrib/lite/toco/tflite/export_test.cc
index d1fdbcb8e9..a95937ba0f 100644
--- a/tensorflow/contrib/lite/toco/tflite/export_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/export_test.cc
@@ -262,7 +262,7 @@ TEST_F(VersionedOpExportTest, Export) {
EXPECT_EQ(1, (*operators)[1]->opcode_index());
}
-// TODO(ahentz): tests for tensors, inputs, outpus, opcodes and operators.
+// TODO(ahentz): tests for tensors, inputs, outputs, opcodes and operators.
} // namespace
} // namespace tflite
diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc
index 8377ba6a03..4b2ef756cc 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator.cc
@@ -290,8 +290,8 @@ class FakeQuant
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
- return ::tflite::CreateFakeQuantOptions(*builder, op.minmax->min,
- op.minmax->max, op.num_bits);
+ return ::tflite::CreateFakeQuantOptions(
+ *builder, op.minmax->min, op.minmax->max, op.num_bits, op.narrow_range);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
@@ -300,9 +300,13 @@ class FakeQuant
minmax->max = options.max();
op->minmax.reset(minmax);
op->num_bits = options.num_bits();
+ op->narrow_range = options.narrow_range();
}
- int GetVersion(const Operator& op) const override { return 1; }
+ int GetVersion(const Operator& op) const override {
+ const auto& fq_op = static_cast<const FakeQuantOperator&>(op);
+ return fq_op.narrow_range ? 2 : 1;
+ }
};
class FullyConnected
@@ -366,12 +370,13 @@ class Gather : public BuiltinOperator<GatherOperator, ::tflite::GatherOptions,
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
- return ::tflite::CreateGatherOptions(*builder, op.axis);
+ int axis = op.axis ? op.axis.value() : 0;
+ return ::tflite::CreateGatherOptions(*builder, axis);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
- op->axis = options.axis();
+ op->axis = {options.axis()};
}
int GetVersion(const Operator& op) const override { return 1; }
@@ -763,6 +768,44 @@ class Sum
int GetVersion(const Operator& op) const override { return 1; }
};
+class ReduceMax
+ : public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
+ ::tflite::BuiltinOptions_ReducerOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->keep_dims = options.keep_dims();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
+class ReduceProd
+ : public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
+ ::tflite::BuiltinOptions_ReducerOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->keep_dims = options.keep_dims();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
class ResizeBilinear
: public BuiltinOperator<ResizeBilinearOperator,
::tflite::ResizeBilinearOptions,
@@ -970,6 +1013,26 @@ class ExpandDims
int GetVersion(const Operator& op) const override { return 1; }
};
+class Pack : public BuiltinOperator<PackOperator, ::tflite::PackOptions,
+ ::tflite::BuiltinOptions_PackOptions> {
+ public:
+ using BuiltinOperator::BuiltinOperator;
+
+ flatbuffers::Offset<TfLiteOptions> WriteOptions(
+ const TocoOperator& op,
+ flatbuffers::FlatBufferBuilder* builder) const override {
+ return ::tflite::CreatePackOptions(*builder, op.values_count, op.axis);
+ }
+
+ void ReadOptions(const TfLiteOptions& options,
+ TocoOperator* op) const override {
+ op->values_count = options.values_count();
+ op->axis = options.axis();
+ }
+
+ int GetVersion(const Operator& op) const override { return 1; }
+};
+
class Shape
: public BuiltinOperator<TensorFlowShapeOperator, ::tflite::ShapeOptions,
::tflite::BuiltinOptions_ShapeOptions> {
@@ -1179,6 +1242,10 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
ops.emplace_back(
new Mean(::tflite::BuiltinOperator_MEAN, OperatorType::kMean));
ops.emplace_back(new Sum(::tflite::BuiltinOperator_SUM, OperatorType::kSum));
+ ops.emplace_back(new ReduceProd(::tflite::BuiltinOperator_REDUCE_PROD,
+ OperatorType::kReduceProd));
+ ops.emplace_back(new ReduceMax(::tflite::BuiltinOperator_REDUCE_MAX,
+ OperatorType::kReduceMax));
ops.emplace_back(new ResizeBilinear(::tflite::BuiltinOperator_RESIZE_BILINEAR,
OperatorType::kResizeBilinear));
ops.emplace_back(
@@ -1209,6 +1276,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
new Shape(::tflite::BuiltinOperator_SHAPE, OperatorType::kShape));
ops.emplace_back(new FakeQuant(::tflite::BuiltinOperator_FAKE_QUANT,
OperatorType::kFakeQuant));
+ ops.emplace_back(
+ new Pack(::tflite::BuiltinOperator_PACK, OperatorType::kPack));
// Custom Operators.
ops.emplace_back(
diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
index ff2d35b1f5..44de6fbf64 100644
--- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc
+++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc
@@ -452,6 +452,16 @@ TEST_F(OperatorTest, BuiltinSparseToDense) {
EXPECT_EQ(op.validate_indices, output_toco_op->validate_indices);
}
+TEST_F(OperatorTest, BuiltinPack) {
+ PackOperator op;
+ op.values_count = 3;
+ op.axis = 1;
+ std::unique_ptr<toco::PackOperator> output_toco_op =
+ SerializeAndDeserialize(GetOperator("PACK", OperatorType::kPack), op);
+ EXPECT_EQ(op.values_count, output_toco_op->values_count);
+ EXPECT_EQ(op.axis, output_toco_op->axis);
+}
+
TEST_F(OperatorTest, TensorFlowUnsupported) {
TensorFlowUnsupportedOperator op;
op.tensorflow_op = "MyCustomUnsupportedOp";
diff --git a/tensorflow/contrib/lite/toco/toco_tooling.cc b/tensorflow/contrib/lite/toco/toco_tooling.cc
index 3ca36338eb..aa7f6996eb 100644
--- a/tensorflow/contrib/lite/toco/toco_tooling.cc
+++ b/tensorflow/contrib/lite/toco/toco_tooling.cc
@@ -55,7 +55,7 @@ void MakeGeneralGraphTransformationsSet(
transformations->Add(new ConvertExpandDimsToReshape);
transformations->Add(new ConvertSqueezeToReshape);
transformations->Add(new ConvertTrivialAddNToAdd);
- transformations->Add(new ConvertTrivialStackToReshape);
+ transformations->Add(new ConvertTrivialPackToReshape);
transformations->Add(new ConvertTrivialTileToConcat);
transformations->Add(new ConvertTrivialTransposeToReshape);
transformations->Add(new ConvertReorderAxes);
@@ -86,11 +86,11 @@ void MakeGeneralGraphTransformationsSet(
transformations->Add(new ResolveConstantBinaryOperator);
transformations->Add(new ResolveConstantFill);
transformations->Add(new ResolveConstantGather);
+ transformations->Add(new ResolveConstantPack);
transformations->Add(new ResolveConstantRandomUniform);
transformations->Add(new ResolveConstantRange);
transformations->Add(new ResolveConstantReshape);
transformations->Add(new ResolveConstantSlice);
- transformations->Add(new ResolveConstantStack);
transformations->Add(new ResolveConstantStridedSlice);
transformations->Add(new ResolveConstantTranspose);
transformations->Add(new ResolveConstantUnaryOperator);
@@ -105,17 +105,19 @@ void MakeGeneralGraphTransformationsSet(
transformations->Add(new IdentifyRelu1);
transformations->Add(new IdentifyPRelu);
transformations->Add(new RemoveTrivialBinaryOperator);
- transformations->Add(new ReadFakeQuantMinMax);
+ transformations->Add(new ResolveFakeQuantArgsFromVars);
+ transformations->Add(new ReadArrayMinmaxAndNarrowRangeFromFakeQuant);
transformations->Add(new ResolveSpaceToBatchNDAttributes);
transformations->Add(new ResolveBatchToSpaceNDAttributes);
transformations->Add(new ResolvePadAttributes);
transformations->Add(new ResolvePadV2Attributes);
transformations->Add(new ResolveStridedSliceAttributes);
transformations->Add(new ResolveSliceAttributes);
- transformations->Add(new ResolveMeanAttributes);
+ transformations->Add(new ResolveReduceAttributes);
transformations->Add(new ResolveConstantShapeOrRank);
transformations->Add(new MakeInitialDequantizeOperator);
transformations->Add(new UnpartitionEmbeddingLookup);
+ transformations->Add(new ResolveGatherAttributes);
}
bool SupportsQuantization(FileFormat format) {
diff --git a/tensorflow/contrib/lite/toco/tooling_util.cc b/tensorflow/contrib/lite/toco/tooling_util.cc
index 4ec74e351f..98e416b76e 100644
--- a/tensorflow/contrib/lite/toco/tooling_util.cc
+++ b/tensorflow/contrib/lite/toco/tooling_util.cc
@@ -350,16 +350,16 @@ const char* OperatorTypeName(OperatorType type) {
HANDLE_OPERATORTYPENAME_CASE(Less)
HANDLE_OPERATORTYPENAME_CASE(LessEqual)
HANDLE_OPERATORTYPENAME_CASE(MatMul)
- HANDLE_OPERATORTYPENAME_CASE(Max) // Reduction Max
- HANDLE_OPERATORTYPENAME_CASE(Maximum) // Element-wise Maximum
+ HANDLE_OPERATORTYPENAME_CASE(ReduceMax) // Reduction Max
+ HANDLE_OPERATORTYPENAME_CASE(Maximum) // Element-wise Maximum
HANDLE_OPERATORTYPENAME_CASE(Merge)
- HANDLE_OPERATORTYPENAME_CASE(Min) // Reduction Min
- HANDLE_OPERATORTYPENAME_CASE(Minimum) // Element-wise Minimum
+ HANDLE_OPERATORTYPENAME_CASE(ReduceMin) // Reduction Min
+ HANDLE_OPERATORTYPENAME_CASE(Minimum) // Element-wise Minimum
HANDLE_OPERATORTYPENAME_CASE(Neg)
+ HANDLE_OPERATORTYPENAME_CASE(Pack)
HANDLE_OPERATORTYPENAME_CASE(Pad)
HANDLE_OPERATORTYPENAME_CASE(PadV2)
HANDLE_OPERATORTYPENAME_CASE(StridedSlice)
- HANDLE_OPERATORTYPENAME_CASE(Stack)
HANDLE_OPERATORTYPENAME_CASE(Range)
HANDLE_OPERATORTYPENAME_CASE(Rank)
HANDLE_OPERATORTYPENAME_CASE(Reshape)
@@ -385,6 +385,7 @@ const char* OperatorTypeName(OperatorType type) {
HANDLE_OPERATORTYPENAME_CASE(SpaceToBatchND)
HANDLE_OPERATORTYPENAME_CASE(BatchToSpaceND)
HANDLE_OPERATORTYPENAME_CASE(Mean)
+ HANDLE_OPERATORTYPENAME_CASE(ReduceProd)
HANDLE_OPERATORTYPENAME_CASE(Svdf)
HANDLE_OPERATORTYPENAME_CASE(ArgMax)
HANDLE_OPERATORTYPENAME_CASE(ArgMin)
@@ -398,6 +399,9 @@ const char* OperatorTypeName(OperatorType type) {
HANDLE_OPERATORTYPENAME_CASE(Equal)
HANDLE_OPERATORTYPENAME_CASE(NotEqual)
HANDLE_OPERATORTYPENAME_CASE(Pow)
+ HANDLE_OPERATORTYPENAME_CASE(Any)
+ HANDLE_OPERATORTYPENAME_CASE(LogicalAnd)
+ HANDLE_OPERATORTYPENAME_CASE(LogicalNot)
default:
LOG(FATAL) << "Unhandled op type";
#undef HANDLE_OPERATORTYPENAME_CASE
@@ -939,8 +943,12 @@ void CheckEachArray(const Model& model) {
// shape.
CHECK(array->has_shape());
// Constant buffer should has a valid shape.
- for (int d : array->shape().dims()) {
- CHECK_GE(d, 1);
+ bool is_scalar =
+ array->shape().dimensions_count() == 1 && array->shape().dims(0) == 0;
+ if (!is_scalar) {
+ for (int d : array->shape().dims()) {
+ CHECK_GE(d, 1);
+ }
}
// The shape flat-size should agree with the buffer length.
CHECK_EQ(array->buffer->Length(),
@@ -1577,11 +1585,6 @@ void ResolveModelFlags(const ModelFlags& model_flags, Model* model) {
model);
}
- for (const auto& input_array : model->flags.input_arrays()) {
- if (input_array.has_shape()) {
- CHECK(input_array.shape().dims_size());
- }
- }
model->flags.set_change_concat_input_ranges(
model_flags.change_concat_input_ranges());
model->flags.set_allow_nonascii_arrays(model_flags.allow_nonascii_arrays());
diff --git a/tensorflow/contrib/lite/toco/tooling_util_test.cc b/tensorflow/contrib/lite/toco/tooling_util_test.cc
index 8609e5bedd..eb495646a2 100644
--- a/tensorflow/contrib/lite/toco/tooling_util_test.cc
+++ b/tensorflow/contrib/lite/toco/tooling_util_test.cc
@@ -39,6 +39,8 @@ std::vector<ShapePair> CreateShapePairs() {
{Shape({256, 256, 3}), Shape({256, 256, 3}), Agreement::kBroadcast},
{Shape({256, 256, 3}), Shape({3}), Agreement::kBroadcast},
{Shape({8, 1, 6, 1}), Shape({7, 1, 5}), Agreement::kBroadcast},
+ {Shape({}), Shape({3}), Agreement::kBroadcast},
+ {Shape({}), Shape({3, 1}), Agreement::kBroadcast},
// These extend (and therefore broadcast).
{Shape({3}), Shape({3}), Agreement::kExtend},
@@ -54,6 +56,7 @@ std::vector<ShapePair> CreateShapePairs() {
{Shape({15, 3, 5}), Shape({15, 1, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 5}), Agreement::kBroadcastNotExtend},
{Shape({15, 3, 5}), Shape({3, 1}), Agreement::kBroadcastNotExtend},
+ {Shape({3, 1}), Shape({}), Agreement::kBroadcastNotExtend},
// These do not broadcast (and therefore also do not extend).
{Shape({3}), Shape({4}), Agreement::kNeither},
@@ -175,6 +178,20 @@ TEST(NumElementsTest, UnsignedInt64) {
EXPECT_EQ(status.error_message(), kLargeTensorMessage);
}
+TEST(NumElementsTest, Scalar) {
+ tensorflow::Status status = tensorflow::Status::OK();
+
+ int32_t count;
+ status = NumElements(std::vector<int32_t>{}, &count);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(count, 1);
+
+ uint64_t countu64;
+ status = NumElements(std::vector<uint64_t>{}, &countu64);
+ EXPECT_TRUE(status.ok());
+ EXPECT_EQ(countu64, 1ULL);
+}
+
TEST(FusedActivationTest, DefaultsToUnfused) {
EXPECT_TRUE(OperatorSupportsFusedActivation(OperatorType::kAdd));
EXPECT_FALSE(OperatorSupportsFusedActivation(OperatorType::kNone));
diff --git a/tensorflow/contrib/lite/tools/BUILD b/tensorflow/contrib/lite/tools/BUILD
index d070018e83..0b26826403 100644
--- a/tensorflow/contrib/lite/tools/BUILD
+++ b/tensorflow/contrib/lite/tools/BUILD
@@ -53,6 +53,7 @@ cc_test(
"//tensorflow/contrib/lite:testdata/test_model_broken.bin",
],
tags = [
+ "no_oss",
"tflite_not_portable_android",
"tflite_not_portable_ios",
],
@@ -79,6 +80,7 @@ cc_test(
size = "small",
srcs = ["verifier_test.cc"],
tags = [
+ "no_oss",
"tflite_not_portable",
],
deps = [
diff --git a/tensorflow/contrib/lite/tools/benchmark/BUILD b/tensorflow/contrib/lite/tools/benchmark/BUILD
index 183a545295..2cb07eb6ec 100644
--- a/tensorflow/contrib/lite/tools/benchmark/BUILD
+++ b/tensorflow/contrib/lite/tools/benchmark/BUILD
@@ -10,11 +10,16 @@ load("//tensorflow/contrib/lite:build_def.bzl", "tflite_copts")
common_copts = ["-Wall"] + tflite_copts()
+cc_library(
+ name = "logging",
+ hdrs = ["logging.h"],
+ copts = common_copts,
+)
+
cc_binary(
name = "benchmark_model",
srcs = [
"benchmark_main.cc",
- "logging.h",
],
copts = common_copts,
linkopts = tflite_linkopts() + select({
@@ -26,6 +31,26 @@ cc_binary(
}),
deps = [
":benchmark_tflite_model_lib",
+ ":logging",
+ ],
+)
+
+cc_test(
+ name = "benchmark_test",
+ srcs = ["benchmark_test.cc"],
+ args = [
+ "--graph=$(location //tensorflow/contrib/lite:testdata/multi_add.bin)",
+ ],
+ data = ["//tensorflow/contrib/lite:testdata/multi_add.bin"],
+ tags = [
+ "tflite_not_portable_android",
+ "tflite_not_portable_ios",
+ ],
+ deps = [
+ ":benchmark_tflite_model_lib",
+ ":command_line_flags",
+ "//tensorflow/contrib/lite/testing:util",
+ "@com_google_googletest//:gtest",
],
)
@@ -58,6 +83,7 @@ cc_library(
copts = common_copts,
deps = [
":benchmark_model_lib",
+ ":logging",
"//tensorflow/contrib/lite:framework",
"//tensorflow/contrib/lite:string_util",
"//tensorflow/contrib/lite/kernels:builtin_ops",
@@ -70,23 +96,23 @@ cc_library(
name = "benchmark_params",
srcs = [
"benchmark_params.cc",
- "logging.h",
],
hdrs = ["benchmark_params.h"],
copts = common_copts,
+ deps = [":logging"],
)
cc_library(
name = "benchmark_model_lib",
srcs = [
"benchmark_model.cc",
- "logging.h",
],
hdrs = ["benchmark_model.h"],
copts = common_copts,
deps = [
":benchmark_params",
":command_line_flags",
+ ":logging",
"//tensorflow/contrib/lite:framework",
"//tensorflow/contrib/lite:string_util",
"//tensorflow/contrib/lite/kernels:builtin_ops",
diff --git a/tensorflow/contrib/lite/tools/benchmark/README.md b/tensorflow/contrib/lite/tools/benchmark/README.md
index 93769305bd..f1e257ad10 100644
--- a/tensorflow/contrib/lite/tools/benchmark/README.md
+++ b/tensorflow/contrib/lite/tools/benchmark/README.md
@@ -115,7 +115,7 @@ E.g. for running the benchmark on big cores on Pixel 2 with a single thread one
can use the following command:
```
-adb shell tasket f0 /data/local/tmp/benchmark_model \
+adb shell taskset f0 /data/local/tmp/benchmark_model \
--graph=/data/local/tmp/mobilenet_quant_v1_224.tflite \
--input_layer="input" \
--input_layer_shape="1,224,224,3" \
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc
index 19b9a9c7ba..f86c0445b0 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.cc
@@ -84,7 +84,7 @@ std::vector<Flag> BenchmarkModel::GetFlags() {
};
}
-void BenchmarkModel::LogFlags() {
+void BenchmarkModel::LogParams() {
TFLITE_LOG(INFO) << "Num runs: [" << params_.Get<int32_t>("num_runs") << "]";
TFLITE_LOG(INFO) << "Inter-run delay (seconds): ["
<< params_.Get<float>("run_delay") << "]";
@@ -122,12 +122,18 @@ Stat<int64_t> BenchmarkModel::Run(int num_times, RunType run_type) {
return run_stats;
}
+bool BenchmarkModel::ValidateParams() { return true; }
+
void BenchmarkModel::Run(int argc, char **argv) {
if (!ParseFlags(argc, argv)) {
return;
}
+ Run();
+}
- LogFlags();
+void BenchmarkModel::Run() {
+ ValidateParams();
+ LogParams();
listeners_.OnBenchmarkStart(params_);
int64_t initialization_start_us = profiling::time::NowMicros();
@@ -155,7 +161,7 @@ bool BenchmarkModel::ParseFlags(int argc, char **argv) {
TFLITE_LOG(ERROR) << usage;
return false;
}
- return ValidateFlags();
+ return true;
}
} // namespace benchmark
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h
index 3c7063b2d4..677a1ee68c 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_model.h
@@ -137,16 +137,17 @@ class BenchmarkModel {
BenchmarkModel();
BenchmarkModel(BenchmarkParams params) : params_(std::move(params)) {}
virtual ~BenchmarkModel() {}
- bool ParseFlags(int argc, char** argv);
virtual void Init() = 0;
void Run(int argc, char** argv);
+ virtual void Run();
void AddListener(BenchmarkListener* listener) {
listeners_.AddListener(listener);
}
protected:
- virtual void LogFlags();
- virtual bool ValidateFlags() { return true; }
+ virtual void LogParams();
+ virtual bool ValidateParams();
+ bool ParseFlags(int argc, char** argv);
virtual std::vector<Flag> GetFlags();
virtual uint64_t ComputeInputBytes() = 0;
virtual tensorflow::Stat<int64_t> Run(int num_times, RunType run_type);
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h b/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
index 33448dd162..c98f47bb0d 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_params.h
@@ -31,6 +31,8 @@ class TypedBenchmarkParam;
class BenchmarkParam {
protected:
enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
+ template <typename T>
+ static ParamType GetValueType();
public:
template <typename T>
@@ -49,8 +51,6 @@ class BenchmarkParam {
private:
static void AssertHasSameType(ParamType a, ParamType b);
- template <typename T>
- static ParamType GetValueType();
const ParamType type_;
};
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_test.cc b/tensorflow/contrib/lite/tools/benchmark/benchmark_test.cc
new file mode 100644
index 0000000000..b697bb394d
--- /dev/null
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_test.cc
@@ -0,0 +1,74 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include "tensorflow/contrib/lite/testing/util.h"
+#include "tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.h"
+#include "tensorflow/contrib/lite/tools/benchmark/command_line_flags.h"
+
+namespace {
+const std::string* g_model_path = nullptr;
+}
+
+namespace tflite {
+namespace benchmark {
+namespace {
+
+BenchmarkParams CreateParams() {
+ BenchmarkParams params;
+ params.AddParam("num_runs", BenchmarkParam::Create<int32_t>(2));
+ params.AddParam("run_delay", BenchmarkParam::Create<float>(-1.0f));
+ params.AddParam("num_threads", BenchmarkParam::Create<int32_t>(1));
+ params.AddParam("benchmark_name", BenchmarkParam::Create<std::string>(""));
+ params.AddParam("output_prefix", BenchmarkParam::Create<std::string>(""));
+ params.AddParam("warmup_runs", BenchmarkParam::Create<int32_t>(1));
+ params.AddParam("graph", BenchmarkParam::Create<std::string>(*g_model_path));
+ params.AddParam("input_layer", BenchmarkParam::Create<std::string>(""));
+ params.AddParam("input_layer_shape", BenchmarkParam::Create<std::string>(""));
+ params.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
+ return params;
+}
+
+TEST(BenchmarkTest, DoesntCrash) {
+ ASSERT_THAT(g_model_path, testing::NotNull());
+
+ BenchmarkTfLiteModel benchmark(CreateParams());
+ benchmark.Run();
+}
+
+} // namespace
+} // namespace benchmark
+} // namespace tflite
+
+int main(int argc, char** argv) {
+ std::string model_path;
+ std::vector<tflite::Flag> flags = {
+ tflite::Flag::CreateFlag("graph", &model_path, "Path to model file.")};
+ g_model_path = &model_path;
+ const bool parse_result =
+ tflite::Flags::Parse(&argc, const_cast<const char**>(argv), flags);
+ if (!parse_result) {
+ std::cerr << tflite::Flags::Usage(argv[0], flags);
+ return 1;
+ }
+
+ ::tflite::LogToStderr();
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.cc b/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.cc
index 73affc26b0..7f97f5d0cd 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.cc
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.cc
@@ -198,8 +198,8 @@ std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
return flags;
}
-void BenchmarkTfLiteModel::LogFlags() {
- BenchmarkModel::LogFlags();
+void BenchmarkTfLiteModel::LogParams() {
+ BenchmarkModel::LogParams();
TFLITE_LOG(INFO) << "Graph: [" << params_.Get<std::string>("graph") << "]";
TFLITE_LOG(INFO) << "Input layers: ["
<< params_.Get<std::string>("input_layer") << "]";
@@ -208,7 +208,7 @@ void BenchmarkTfLiteModel::LogFlags() {
TFLITE_LOG(INFO) << "Use nnapi : [" << params_.Get<bool>("use_nnapi") << "]";
}
-bool BenchmarkTfLiteModel::ValidateFlags() {
+bool BenchmarkTfLiteModel::ValidateParams() {
if (params_.Get<std::string>("graph").empty()) {
TFLITE_LOG(ERROR)
<< "Please specify the name of your TF Lite input file with --graph";
diff --git a/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.h b/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.h
index 50cc3f24b3..9931dcbafe 100644
--- a/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.h
+++ b/tensorflow/contrib/lite/tools/benchmark/benchmark_tflite_model.h
@@ -54,8 +54,8 @@ class BenchmarkTfLiteModel : public BenchmarkModel {
BenchmarkTfLiteModel(BenchmarkParams params);
std::vector<Flag> GetFlags() override;
- void LogFlags() override;
- bool ValidateFlags() override;
+ void LogParams() override;
+ bool ValidateParams() override;
uint64_t ComputeInputBytes() override;
void Init() override;
void RunImpl() override;
diff --git a/tensorflow/contrib/lite/util.h b/tensorflow/contrib/lite/util.h
index 89d9b4f5cf..3c4801183b 100644
--- a/tensorflow/contrib/lite/util.h
+++ b/tensorflow/contrib/lite/util.h
@@ -26,12 +26,17 @@ limitations under the License.
namespace tflite {
-// Converts a `std::vector` to a `TfLiteIntArray`.
+// Converts a `std::vector` to a `TfLiteIntArray`. The caller takes ownership
+// of the returned pointer.
TfLiteIntArray* ConvertVectorToTfLiteIntArray(const std::vector<int>& input);
+// Converts an array (of the given size) to a `TfLiteIntArray`. The caller
+// takes ownership of the returned pointer, and must make sure 'dims' has at
+// least 'rank' elemnts.
TfLiteIntArray* ConvertArrayToTfLiteIntArray(const int rank, const int* dims);
// Checks whether a `TfLiteIntArray` and an int array have matching elements.
+// The caller must guarantee that 'b' has at least 'b_size' elements.
bool EqualArrayAndTfLiteIntArray(const TfLiteIntArray* a, const int b_size,
const int* b);
diff --git a/tensorflow/contrib/lookup/lookup_ops_test.py b/tensorflow/contrib/lookup/lookup_ops_test.py
index 889accdd5a..8d510ede58 100644
--- a/tensorflow/contrib/lookup/lookup_ops_test.py
+++ b/tensorflow/contrib/lookup/lookup_ops_test.py
@@ -280,6 +280,21 @@ class HashTableOpTest(test.TestCase):
table.init.run()
self.assertAllEqual(3, table.size().eval())
+ def testHashTableInt32String(self):
+ with self.test_session():
+ default_val = "n/a"
+ keys = constant_op.constant([0, 1, 2], dtypes.int32)
+ values = constant_op.constant(["brain", "salad", "surgery"])
+ table = lookup.HashTable(
+ lookup.KeyValueTensorInitializer(keys, values), default_val)
+ table.init.run()
+
+ input_tensor = constant_op.constant([0, 1, -1])
+ output = table.lookup(input_tensor)
+
+ result = output.eval()
+ self.assertAllEqual([b"brain", b"salad", b"n/a"], result)
+
class MutableHashTableOpTest(test.TestCase):
diff --git a/tensorflow/contrib/makefile/proto_text_cc_files.txt b/tensorflow/contrib/makefile/proto_text_cc_files.txt
index 76428bc1d4..7d26429f9c 100644
--- a/tensorflow/contrib/makefile/proto_text_cc_files.txt
+++ b/tensorflow/contrib/makefile/proto_text_cc_files.txt
@@ -35,6 +35,7 @@ tensorflow/core/lib/random/random.cc
tensorflow/core/lib/random/distribution_sampler.cc
tensorflow/core/lib/io/zlib_outputbuffer.cc
tensorflow/core/lib/io/zlib_inputstream.cc
+tensorflow/core/lib/io/zlib_compression_options.cc
tensorflow/core/lib/io/two_level_iterator.cc
tensorflow/core/lib/io/table_builder.cc
tensorflow/core/lib/io/table.cc
diff --git a/tensorflow/contrib/makefile/tf_op_files.txt b/tensorflow/contrib/makefile/tf_op_files.txt
index 6e7423f85e..ecf2e120df 100644
--- a/tensorflow/contrib/makefile/tf_op_files.txt
+++ b/tensorflow/contrib/makefile/tf_op_files.txt
@@ -229,6 +229,8 @@ tensorflow/core/kernels/cast_op_impl_int32.cc
tensorflow/core/kernels/cast_op_impl_int64.cc
tensorflow/core/kernels/cast_op_impl_int8.cc
tensorflow/core/kernels/cast_op_impl_uint16.cc
+tensorflow/core/kernels/cast_op_impl_uint32.cc
+tensorflow/core/kernels/cast_op_impl_uint64.cc
tensorflow/core/kernels/cast_op_impl_uint8.cc
tensorflow/core/kernels/boosted_trees/prediction_ops.cc
tensorflow/core/kernels/boosted_trees/resource_ops.cc
diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops.py b/tensorflow/contrib/metrics/python/ops/metric_ops.py
index b14202ff9e..a328670526 100644
--- a/tensorflow/contrib/metrics/python/ops/metric_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/metric_ops.py
@@ -3715,6 +3715,7 @@ def count(values,
name=None):
"""Computes the number of examples, or sum of `weights`.
+ This metric keeps track of the denominator in `tf.metrics.mean`.
When evaluating some metric (e.g. mean) on one or more subsets of the data,
this auxiliary metric is useful for keeping track of how many examples there
are in each subset.
@@ -3741,15 +3742,21 @@ def count(values,
ValueError: If `weights` is not `None` and its shape doesn't match `values`,
or if either `metrics_collections` or `updates_collections` are not a list
or tuple.
+ RuntimeError: If eager execution is enabled.
"""
+ if context.executing_eagerly():
+ raise RuntimeError('tf.contrib.metrics.count is not supported when eager '
+ 'execution is enabled.')
with variable_scope.variable_scope(name, 'count', (values, weights)):
+
count_ = metrics_impl.metric_variable([], dtypes.float32, name='count')
if weights is None:
num_values = math_ops.to_float(array_ops.size(values))
else:
- _, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
+ values = math_ops.to_float(values)
+ values, _, weights = metrics_impl._remove_squeezable_dimensions( # pylint: disable=protected-access
predictions=values,
labels=None,
weights=weights)
@@ -3758,15 +3765,14 @@ def count(values,
num_values = math_ops.reduce_sum(weights)
with ops.control_dependencies([values]):
- update_op = state_ops.assign_add(count_, num_values)
+ update_count_op = state_ops.assign_add(count_, num_values)
- if metrics_collections:
- ops.add_to_collections(metrics_collections, count_)
+ count_ = metrics_impl._aggregate_variable(count_, metrics_collections) # pylint: disable=protected-access
if updates_collections:
- ops.add_to_collections(updates_collections, update_op)
+ ops.add_to_collections(updates_collections, update_count_op)
- return count_, update_op
+ return count_, update_count_op
def cohen_kappa(labels,
diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops_test.py b/tensorflow/contrib/metrics/python/ops/metric_ops_test.py
index a09fc4abd4..401fedcbed 100644
--- a/tensorflow/contrib/metrics/python/ops/metric_ops_test.py
+++ b/tensorflow/contrib/metrics/python/ops/metric_ops_test.py
@@ -6854,6 +6854,11 @@ class CountTest(test.TestCase):
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
+ def testReturnType(self):
+ c, op = metrics.count(array_ops.ones([4, 3]))
+ self.assertTrue(isinstance(c, ops.Tensor))
+ self.assertTrue(isinstance(op, ops.Operation) or isinstance(op, ops.Tensor))
+
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
diff --git a/tensorflow/contrib/model_pruning/README.md b/tensorflow/contrib/model_pruning/README.md
index 86f4fd6adf..9143d082bf 100644
--- a/tensorflow/contrib/model_pruning/README.md
+++ b/tensorflow/contrib/model_pruning/README.md
@@ -66,10 +66,10 @@ is the sparsity_function_begin_step. In this equation, the
sparsity_function_exponent is set to 3.
### Adding pruning ops to the training graph
-The final step involves adding ops to the training graph that monitors the
-distribution of the layer's weight magnitudes and determines the layer threshold
-such masking all the weights below this threshold achieves the sparsity level
-desired for the current training step. This can be achieved as follows:
+The final step involves adding ops to the training graph that monitor the
+distribution of the layer's weight magnitudes and determine the layer threshold,
+such that masking all the weights below this threshold achieves the sparsity
+level desired for the current training step. This can be achieved as follows:
```python
tf.app.flags.DEFINE_string(
@@ -79,7 +79,7 @@ tf.app.flags.DEFINE_string(
with tf.graph.as_default():
# Create global step variable
- global_step = tf.train.get_global_step()
+ global_step = tf.train.get_or_create_global_step()
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
@@ -103,6 +103,7 @@ with tf.graph.as_default():
mon_sess.run(mask_update_op)
```
+Ensure that `global_step` is being [incremented](https://www.tensorflow.org/api_docs/python/tf/train/Optimizer#minimize), otherwise pruning will not work!
## Example: Pruning and training deep CNNs on the cifar10 dataset
diff --git a/tensorflow/contrib/model_pruning/python/learning.py b/tensorflow/contrib/model_pruning/python/learning.py
index 2b79c23cef..26695237c2 100644
--- a/tensorflow/contrib/model_pruning/python/learning.py
+++ b/tensorflow/contrib/model_pruning/python/learning.py
@@ -33,11 +33,14 @@ to support training of pruned models
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
- # Set up sparsity
- sparsity = pruning.setup_gradual_sparsity(self.global_step)
+ # Parse pruning hyperparameters
+ pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
- # Create mask update op
- mask_update_op = pruning.add_mask_update_ip(sparsity)
+ # Create a pruning object using the pruning_hparams
+ p = pruning.Pruning(pruning_hparams)
+
+ # Add mask update ops to the graph
+ mask_update_op = p.conditional_mask_update_op()
# Run training.
learning.train(train_op,
diff --git a/tensorflow/contrib/model_pruning/python/pruning.py b/tensorflow/contrib/model_pruning/python/pruning.py
index 4b7af18b33..da9d398cbc 100644
--- a/tensorflow/contrib/model_pruning/python/pruning.py
+++ b/tensorflow/contrib/model_pruning/python/pruning.py
@@ -518,11 +518,11 @@ class Pruning(object):
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
- for index, mask in enumerate(masks):
+ for mask, threshold in zip(masks, thresholds):
if not self._exists_in_do_not_prune_list(mask.name):
- summary.scalar(mask.name + '/sparsity', nn_impl.zero_fraction(mask))
- summary.scalar(thresholds[index].op.name + '/threshold',
- thresholds[index])
+ summary.scalar(mask.op.name + '/sparsity',
+ nn_impl.zero_fraction(mask))
+ summary.scalar(threshold.op.name + '/threshold', threshold)
def print_hparams(self):
logging.info(self._spec.to_json())
diff --git a/tensorflow/contrib/nccl/kernels/nccl_manager.cc b/tensorflow/contrib/nccl/kernels/nccl_manager.cc
index b1cb89391c..99fecf9651 100644
--- a/tensorflow/contrib/nccl/kernels/nccl_manager.cc
+++ b/tensorflow/contrib/nccl/kernels/nccl_manager.cc
@@ -445,7 +445,7 @@ void NcclManager::LoopKernelLaunches(NcclStream* nccl_stream) {
se::Stream* comm_stream = nccl_stream->stream.get();
ScopedActivateExecutorContext scoped_context(nccl_stream->executor);
const cudaStream_t* cu_stream = reinterpret_cast<const cudaStream_t*>(
- comm_stream->implementation()->CudaStreamMemberHack());
+ comm_stream->implementation()->GpuStreamMemberHack());
while (true) {
// Find collective to run.
diff --git a/tensorflow/contrib/optimizer_v2/BUILD b/tensorflow/contrib/optimizer_v2/BUILD
index 5225ecc14f..3ba3ee29ec 100644
--- a/tensorflow/contrib/optimizer_v2/BUILD
+++ b/tensorflow/contrib/optimizer_v2/BUILD
@@ -193,6 +193,7 @@ cuda_py_test(
srcs = ["rmsprop_test.py"],
additional_deps = [
":training",
+ "@absl_py//absl/testing:parameterized",
"//tensorflow/python:array_ops",
"//tensorflow/python:embedding_ops",
"//tensorflow/python:framework",
diff --git a/tensorflow/contrib/optimizer_v2/rmsprop_test.py b/tensorflow/contrib/optimizer_v2/rmsprop_test.py
index ed68f6afbf..dc23ef241a 100644
--- a/tensorflow/contrib/optimizer_v2/rmsprop_test.py
+++ b/tensorflow/contrib/optimizer_v2/rmsprop_test.py
@@ -19,15 +19,16 @@ from __future__ import division
from __future__ import print_function
import copy
-import itertools
import math
+from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.optimizer_v2 import rmsprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
@@ -48,13 +49,8 @@ _TEST_PARAM_VALUES = [
[0.5, 0.95, 0.9, 1e-5, True, False],
]
-_TESTPARAMS = [
- [data_type] + values
- for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
-]
-
-class RMSPropOptimizerTest(test.TestCase):
+class RMSPropOptimizerTest(test.TestCase, parameterized.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum,
epsilon, centered):
@@ -87,362 +83,366 @@ class RMSPropOptimizerTest(test.TestCase):
var_t[gindex] = var[gindex] - mom_t[gindex]
return var_t, mg_t, rms_t, mom_t
- def testDense(self):
- # TODO(yori): Use ParameterizedTest when available
- for (dtype, learning_rate, decay, momentum,
- epsilon, centered, use_resource) in _TESTPARAMS:
- with self.test_session(use_gpu=True):
- # Initialize variables for numpy implementation.
- var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
- grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
- var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
- grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
-
- if use_resource:
- var0 = resource_variable_ops.ResourceVariable(var0_np)
- var1 = resource_variable_ops.ResourceVariable(var1_np)
- else:
- var0 = variables.Variable(var0_np)
- var1 = variables.Variable(var1_np)
- grads0 = constant_op.constant(grads0_np)
- grads1 = constant_op.constant(grads1_np)
- opt = rmsprop.RMSPropOptimizer(
- learning_rate=learning_rate,
- decay=decay,
- momentum=momentum,
- epsilon=epsilon,
- centered=centered)
-
- update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
- variables.global_variables_initializer().run()
-
- mg0 = opt.get_slot(var0, "mg")
- self.assertEqual(mg0 is not None, centered)
- mg1 = opt.get_slot(var1, "mg")
- self.assertEqual(mg1 is not None, centered)
- rms0 = opt.get_slot(var0, "rms")
- self.assertTrue(rms0 is not None)
- rms1 = opt.get_slot(var1, "rms")
- self.assertTrue(rms1 is not None)
- mom0 = opt.get_slot(var0, "momentum")
- self.assertTrue(mom0 is not None)
- mom1 = opt.get_slot(var1, "momentum")
- self.assertTrue(mom1 is not None)
-
- mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
- mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
- rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
- rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
- mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
- mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
-
- # Fetch params to validate initial values
- self.assertAllClose([1.0, 2.0], var0.eval())
- self.assertAllClose([3.0, 4.0], var1.eval())
-
- # Run 4 steps of RMSProp
- for _ in range(1, 5):
- update.run()
-
- var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
- var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
- decay, momentum, epsilon, centered)
- var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
- var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
- decay, momentum, epsilon, centered)
-
- # Validate updated params
- if centered:
- self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
- self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
- self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
- self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
- self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
- self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
- self.assertAllCloseAccordingToType(var0_np, var0.eval())
- self.assertAllCloseAccordingToType(var1_np, var1.eval())
-
- def testMinimizeSparseResourceVariable(self):
- for dtype in [dtypes.float32, dtypes.float64]:
- with self.test_session():
- var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
- x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
- pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
- loss = pred * pred
- sgd_op = rmsprop.RMSPropOptimizer(
- learning_rate=1.0,
- decay=0.0,
- momentum=0.0,
- epsilon=0.0,
- centered=False).minimize(loss)
- variables.global_variables_initializer().run()
- # Fetch params to validate initial values
- self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
- # Run 1 step of sgd
- sgd_op.run()
- # Validate updated params
- self.assertAllCloseAccordingToType(
- [[0., 1.]], var0.eval(), atol=0.01)
-
- def testMinimizeSparseResourceVariableCentered(self):
- for dtype in [dtypes.float32, dtypes.float64]:
- with self.test_session():
- var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
- x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
- pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
- loss = pred * pred
- sgd_op = rmsprop.RMSPropOptimizer(
- learning_rate=1.0,
- decay=0.0,
- momentum=0.0,
- epsilon=1.0,
- centered=True).minimize(loss)
- variables.global_variables_initializer().run()
- # Fetch params to validate initial values
- self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
- # Run 1 step of sgd
- sgd_op.run()
- # Validate updated params
- self.assertAllCloseAccordingToType(
- [[-111, -138]], var0.eval(), atol=0.01)
-
- def testSparse(self):
- # TODO(yori): Use ParameterizedTest when available
- for (dtype, learning_rate, decay,
- momentum, epsilon, centered, _) in _TESTPARAMS:
- with self.test_session(use_gpu=True):
- # Initialize variables for numpy implementation.
- var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
- grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
- var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
- grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
-
+ @parameterized.named_parameters(
+ *test_util.generate_combinations_with_testcase_name(
+ dtype=_DATA_TYPES, param_value=_TEST_PARAM_VALUES))
+ def testDense(self, dtype, param_value):
+ (learning_rate, decay, momentum, epsilon, centered, use_resource) = tuple(
+ param_value)
+ with self.test_session(use_gpu=True):
+ # Initialize variables for numpy implementation.
+ var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
+ grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
+ grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
+
+ if use_resource:
+ var0 = resource_variable_ops.ResourceVariable(var0_np)
+ var1 = resource_variable_ops.ResourceVariable(var1_np)
+ else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
- grads0_np_indices = np.array([0], dtype=np.int32)
- grads0 = ops.IndexedSlices(
- constant_op.constant(grads0_np),
- constant_op.constant(grads0_np_indices), constant_op.constant([1]))
- grads1_np_indices = np.array([1], dtype=np.int32)
- grads1 = ops.IndexedSlices(
- constant_op.constant(grads1_np),
- constant_op.constant(grads1_np_indices), constant_op.constant([1]))
- opt = rmsprop.RMSPropOptimizer(
- learning_rate=learning_rate,
- decay=decay,
- momentum=momentum,
- epsilon=epsilon,
- centered=centered)
- update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
- variables.global_variables_initializer().run()
-
- mg0 = opt.get_slot(var0, "mg")
- self.assertEqual(mg0 is not None, centered)
- mg1 = opt.get_slot(var1, "mg")
- self.assertEqual(mg1 is not None, centered)
- rms0 = opt.get_slot(var0, "rms")
- self.assertTrue(rms0 is not None)
- rms1 = opt.get_slot(var1, "rms")
- self.assertTrue(rms1 is not None)
- mom0 = opt.get_slot(var0, "momentum")
- self.assertTrue(mom0 is not None)
- mom1 = opt.get_slot(var1, "momentum")
- self.assertTrue(mom1 is not None)
-
- mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
- mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
- rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
- rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
- mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
- mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
-
- # Fetch params to validate initial values
- self.assertAllClose([1.0, 2.0], var0.eval())
- self.assertAllClose([3.0, 4.0], var1.eval())
-
- # Run 4 steps of RMSProp
- for _ in range(1, 5):
- update.run()
-
- var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
- var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
- learning_rate, decay, momentum, epsilon, centered)
- var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
- var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
- learning_rate, decay, momentum, epsilon, centered)
-
- # Validate updated params
- if centered:
- self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
- self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
- self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
- self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
- self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
- self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
- self.assertAllCloseAccordingToType(var0_np, var0.eval())
- self.assertAllCloseAccordingToType(var1_np, var1.eval())
-
- def testWithoutMomentum(self):
- for dtype in [dtypes.half, dtypes.float32]:
- with self.test_session(use_gpu=True):
- var0 = variables.Variable([1.0, 2.0], dtype=dtype)
- var1 = variables.Variable([3.0, 4.0], dtype=dtype)
- grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
- grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
- opt = rmsprop.RMSPropOptimizer(
- learning_rate=2.0, decay=0.9, momentum=0.0, epsilon=1.0)
- update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
- variables.global_variables_initializer().run()
-
- rms0 = opt.get_slot(var0, "rms")
- self.assertTrue(rms0 is not None)
- rms1 = opt.get_slot(var1, "rms")
- self.assertTrue(rms1 is not None)
- mom0 = opt.get_slot(var0, "momentum")
- self.assertTrue(mom0 is not None)
- mom1 = opt.get_slot(var1, "momentum")
- self.assertTrue(mom1 is not None)
-
- # Fetch params to validate initial values
- self.assertAllClose([1.0, 2.0], var0.eval())
- self.assertAllClose([3.0, 4.0], var1.eval())
- # Step 1: the rms accumulators where 1. So we should see a normal
- # update: v -= grad * learning_rate
- update.run()
- # Check the root mean square accumulators.
- self.assertAllCloseAccordingToType(
- np.array([0.901, 0.901]), rms0.eval())
- self.assertAllCloseAccordingToType(
- np.array([0.90001, 0.90001]), rms1.eval())
- # Check the parameters.
- self.assertAllCloseAccordingToType(
- np.array([
- 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)),
- 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0))
- ]), var0.eval())
- self.assertAllCloseAccordingToType(
- np.array([
- 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)),
- 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0))
- ]), var1.eval())
- # Step 2: the root mean square accumulators contain the previous update.
- update.run()
- # Check the rms accumulators.
- self.assertAllCloseAccordingToType(
- np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
- self.assertAllCloseAccordingToType(
- np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
- # Check the parameters.
- self.assertAllCloseAccordingToType(
- np.array([
- 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
- (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0)),
- 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
- (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0))
- ]), var0.eval())
- self.assertAllCloseAccordingToType(
- np.array([
- 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
- (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0)),
- 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
- (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0))
- ]), var1.eval())
-
- def testWithMomentum(self):
- for dtype in [dtypes.half, dtypes.float32]:
- with self.test_session(use_gpu=True):
- var0 = variables.Variable([1.0, 2.0], dtype=dtype)
- var1 = variables.Variable([3.0, 4.0], dtype=dtype)
- grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
- grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
-
- opt = rmsprop.RMSPropOptimizer(
- learning_rate=2.0, decay=0.9, momentum=0.5, epsilon=1e-5)
- update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
- variables.global_variables_initializer().run()
-
- rms0 = opt.get_slot(var0, "rms")
- self.assertTrue(rms0 is not None)
- rms1 = opt.get_slot(var1, "rms")
- self.assertTrue(rms1 is not None)
- mom0 = opt.get_slot(var0, "momentum")
- self.assertTrue(mom0 is not None)
- mom1 = opt.get_slot(var1, "momentum")
- self.assertTrue(mom1 is not None)
-
- # Fetch params to validate initial values
- self.assertAllClose([1.0, 2.0], var0.eval())
- self.assertAllClose([3.0, 4.0], var1.eval())
- # Step 1: rms = 1, mom = 0. So we should see a normal
- # update: v -= grad * learning_rate
+ grads0 = constant_op.constant(grads0_np)
+ grads1 = constant_op.constant(grads1_np)
+ opt = rmsprop.RMSPropOptimizer(
+ learning_rate=learning_rate,
+ decay=decay,
+ momentum=momentum,
+ epsilon=epsilon,
+ centered=centered)
+
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ mg0 = opt.get_slot(var0, "mg")
+ self.assertEqual(mg0 is not None, centered)
+ mg1 = opt.get_slot(var1, "mg")
+ self.assertEqual(mg1 is not None, centered)
+ rms0 = opt.get_slot(var0, "rms")
+ self.assertIsNotNone(rms0)
+ rms1 = opt.get_slot(var1, "rms")
+ self.assertIsNotNone(rms1)
+ mom0 = opt.get_slot(var0, "momentum")
+ self.assertIsNotNone(mom0)
+ mom1 = opt.get_slot(var1, "momentum")
+ self.assertIsNotNone(mom1)
+
+ mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+ mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+ rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
+ rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
+ mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+ mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run 4 steps of RMSProp
+ for _ in range(4):
update.run()
- # Check the root mean square accumulators.
- self.assertAllCloseAccordingToType(
- np.array([0.901, 0.901]), rms0.eval())
- self.assertAllCloseAccordingToType(
- np.array([0.90001, 0.90001]), rms1.eval())
- # Check the momentum accumulators
- self.assertAllCloseAccordingToType(
- np.array([(0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
- (0.1 * 2.0 / math.sqrt(0.901 + 1e-5))]), mom0.eval())
- self.assertAllCloseAccordingToType(
- np.array([(0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
- (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))]), mom1.eval())
-
- # Check that the parameters.
- self.assertAllCloseAccordingToType(
- np.array([
- 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
- 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5))
- ]), var0.eval())
- self.assertAllCloseAccordingToType(
- np.array([
- 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
- 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))
- ]), var1.eval())
-
- # Step 2: the root mean square accumulators contain the previous update.
+
+ var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
+ var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
+ decay, momentum, epsilon, centered)
+ var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
+ var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
+ decay, momentum, epsilon, centered)
+
+ # Validate updated params
+ if centered:
+ self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
+ self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
+ self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
+ self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
+ self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
+ self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
+ self.assertAllCloseAccordingToType(var0_np, var0.eval())
+ self.assertAllCloseAccordingToType(var1_np, var1.eval())
+
+ @parameterized.parameters([dtypes.float32, dtypes.float64])
+ def testMinimizeSparseResourceVariable(self, dtype):
+ with self.test_session():
+ var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
+ x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
+ pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
+ loss = pred * pred
+ sgd_op = rmsprop.RMSPropOptimizer(
+ learning_rate=1.0,
+ decay=0.0,
+ momentum=0.0,
+ epsilon=0.0,
+ centered=False).minimize(loss)
+ variables.global_variables_initializer().run()
+ # Fetch params to validate initial values
+ self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
+ # Run 1 step of sgd
+ sgd_op.run()
+ # Validate updated params
+ self.assertAllCloseAccordingToType(
+ [[0., 1.]], var0.eval(), atol=0.01)
+
+ @parameterized.parameters([dtypes.float32, dtypes.float64])
+ def testMinimizeSparseResourceVariableCentered(self, dtype):
+ with self.test_session():
+ var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
+ x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
+ pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
+ loss = pred * pred
+ sgd_op = rmsprop.RMSPropOptimizer(
+ learning_rate=1.0,
+ decay=0.0,
+ momentum=0.0,
+ epsilon=1.0,
+ centered=True).minimize(loss)
+ variables.global_variables_initializer().run()
+ # Fetch params to validate initial values
+ self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
+ # Run 1 step of sgd
+ sgd_op.run()
+ # Validate updated params
+ self.assertAllCloseAccordingToType(
+ [[-111, -138]], var0.eval(), atol=0.01)
+
+ @parameterized.named_parameters(
+ *test_util.generate_combinations_with_testcase_name(
+ dtype=_DATA_TYPES, param_value=_TEST_PARAM_VALUES))
+ def testSparse(self, dtype, param_value):
+ (learning_rate, decay, momentum, epsilon, centered, _) = tuple(
+ param_value)
+ with self.test_session(use_gpu=True):
+ # Initialize variables for numpy implementation.
+ var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
+ grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
+ var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
+ grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
+
+ var0 = variables.Variable(var0_np)
+ var1 = variables.Variable(var1_np)
+ grads0_np_indices = np.array([0], dtype=np.int32)
+ grads0 = ops.IndexedSlices(
+ constant_op.constant(grads0_np),
+ constant_op.constant(grads0_np_indices), constant_op.constant([1]))
+ grads1_np_indices = np.array([1], dtype=np.int32)
+ grads1 = ops.IndexedSlices(
+ constant_op.constant(grads1_np),
+ constant_op.constant(grads1_np_indices), constant_op.constant([1]))
+ opt = rmsprop.RMSPropOptimizer(
+ learning_rate=learning_rate,
+ decay=decay,
+ momentum=momentum,
+ epsilon=epsilon,
+ centered=centered)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ mg0 = opt.get_slot(var0, "mg")
+ self.assertEqual(mg0 is not None, centered)
+ mg1 = opt.get_slot(var1, "mg")
+ self.assertEqual(mg1 is not None, centered)
+ rms0 = opt.get_slot(var0, "rms")
+ self.assertIsNotNone(rms0)
+ rms1 = opt.get_slot(var1, "rms")
+ self.assertIsNotNone(rms1)
+ mom0 = opt.get_slot(var0, "momentum")
+ self.assertIsNotNone(mom0)
+ mom1 = opt.get_slot(var1, "momentum")
+ self.assertIsNotNone(mom1)
+
+ mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+ mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+ rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
+ rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
+ mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+ mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+
+ # Run 4 steps of RMSProp
+ for _ in range(4):
update.run()
- # Check the rms accumulators.
- self.assertAllCloseAccordingToType(
- np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
- self.assertAllCloseAccordingToType(
- np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
- self.assertAllCloseAccordingToType(
- np.array([
- 0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
- (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)),
- 0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
- (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))
- ]), mom0.eval())
- self.assertAllCloseAccordingToType(
- np.array([
- 0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
- (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)),
- 0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
- (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))
- ]), mom1.eval())
-
- # Check the parameters.
- self.assertAllCloseAccordingToType(
- np.array([
- 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
- (0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
- (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))),
- 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
- (0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
- (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)))
- ]), var0.eval())
-
- self.assertAllCloseAccordingToType(
- np.array([
- 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
- (0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
- (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))),
- 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
- (0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
- (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)))
- ]), var1.eval())
+
+ var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
+ var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
+ learning_rate, decay, momentum, epsilon, centered)
+ var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
+ var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
+ learning_rate, decay, momentum, epsilon, centered)
+
+ # Validate updated params
+ if centered:
+ self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
+ self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
+ self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
+ self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
+ self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
+ self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
+ self.assertAllCloseAccordingToType(var0_np, var0.eval())
+ self.assertAllCloseAccordingToType(var1_np, var1.eval())
+
+ @parameterized.parameters(_DATA_TYPES)
+ def testWithoutMomentum(self, dtype):
+ with self.test_session(use_gpu=True):
+ var0 = variables.Variable([1.0, 2.0], dtype=dtype)
+ var1 = variables.Variable([3.0, 4.0], dtype=dtype)
+ grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
+ grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
+ opt = rmsprop.RMSPropOptimizer(
+ learning_rate=2.0, decay=0.9, momentum=0.0, epsilon=1.0)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ rms0 = opt.get_slot(var0, "rms")
+ self.assertIsNotNone(rms0)
+ rms1 = opt.get_slot(var1, "rms")
+ self.assertIsNotNone(rms1)
+ mom0 = opt.get_slot(var0, "momentum")
+ self.assertIsNotNone(mom0)
+ mom1 = opt.get_slot(var1, "momentum")
+ self.assertIsNotNone(mom1)
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+ # Step 1: the rms accumulators where 1. So we should see a normal
+ # update: v -= grad * learning_rate
+ update.run()
+ # Check the root mean square accumulators.
+ self.assertAllCloseAccordingToType(
+ np.array([0.901, 0.901]), rms0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([0.90001, 0.90001]), rms1.eval())
+ # Check the parameters.
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)),
+ 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0))
+ ]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)),
+ 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0))
+ ]), var1.eval())
+ # Step 2: the root mean square accumulators contain the previous update.
+ update.run()
+ # Check the rms accumulators.
+ self.assertAllCloseAccordingToType(
+ np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
+ # Check the parameters.
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
+ (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0)),
+ 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
+ (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0))
+ ]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
+ (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0)),
+ 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
+ (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0))
+ ]), var1.eval())
+
+ @parameterized.parameters(_DATA_TYPES)
+ def testWithMomentum(self, dtype):
+ with self.test_session(use_gpu=True):
+ var0 = variables.Variable([1.0, 2.0], dtype=dtype)
+ var1 = variables.Variable([3.0, 4.0], dtype=dtype)
+ grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
+ grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
+
+ opt = rmsprop.RMSPropOptimizer(
+ learning_rate=2.0, decay=0.9, momentum=0.5, epsilon=1e-5)
+ update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
+ variables.global_variables_initializer().run()
+
+ rms0 = opt.get_slot(var0, "rms")
+ self.assertIsNotNone(rms0)
+ rms1 = opt.get_slot(var1, "rms")
+ self.assertIsNotNone(rms1)
+ mom0 = opt.get_slot(var0, "momentum")
+ self.assertIsNotNone(mom0)
+ mom1 = opt.get_slot(var1, "momentum")
+ self.assertIsNotNone(mom1)
+
+ # Fetch params to validate initial values
+ self.assertAllClose([1.0, 2.0], var0.eval())
+ self.assertAllClose([3.0, 4.0], var1.eval())
+ # Step 1: rms = 1, mom = 0. So we should see a normal
+ # update: v -= grad * learning_rate
+ update.run()
+ # Check the root mean square accumulators.
+ self.assertAllCloseAccordingToType(
+ np.array([0.901, 0.901]), rms0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([0.90001, 0.90001]), rms1.eval())
+ # Check the momentum accumulators
+ self.assertAllCloseAccordingToType(
+ np.array([(0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
+ (0.1 * 2.0 / math.sqrt(0.901 + 1e-5))]), mom0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([(0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
+ (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))]), mom1.eval())
+
+ # Check that the parameters.
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
+ 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5))
+ ]), var0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
+ 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))
+ ]), var1.eval())
+
+ # Step 2: the root mean square accumulators contain the previous update.
+ update.run()
+ # Check the rms accumulators.
+ self.assertAllCloseAccordingToType(
+ np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]), rms0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]), rms1.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
+ (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)),
+ 0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
+ (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))
+ ]), mom0.eval())
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
+ (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)),
+ 0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
+ (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))
+ ]), mom1.eval())
+
+ # Check the parameters.
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
+ (0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
+ (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))),
+ 2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
+ (0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
+ (0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)))
+ ]), var0.eval())
+
+ self.assertAllCloseAccordingToType(
+ np.array([
+ 3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
+ (0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
+ (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))),
+ 4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
+ (0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
+ (0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)))
+ ]), var1.eval())
if __name__ == "__main__":
diff --git a/tensorflow/contrib/proto/BUILD b/tensorflow/contrib/proto/BUILD
index d45622174f..b27142cf4a 100644
--- a/tensorflow/contrib/proto/BUILD
+++ b/tensorflow/contrib/proto/BUILD
@@ -16,15 +16,3 @@ py_library(
"//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
],
)
-
-py_library(
- name = "proto_pip",
- data = if_static(
- [],
- otherwise = ["//tensorflow/contrib/proto/python/kernel_tests:libtestexample.so"],
- ),
- deps = [
- ":proto",
- "//tensorflow/contrib/proto/python/kernel_tests:py_test_deps",
- ],
-)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/BUILD b/tensorflow/contrib/proto/python/kernel_tests/BUILD
index 3f53ef1707..125c1cee29 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/proto/python/kernel_tests/BUILD
@@ -10,33 +10,12 @@ load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
tf_py_test(
- name = "decode_proto_fail_test",
- size = "small",
- srcs = ["decode_proto_fail_test.py"],
- additional_deps = [
- ":py_test_deps",
- "//third_party/py/numpy",
- "//tensorflow/contrib/proto:proto",
- "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
- ],
- data = if_static(
- [],
- otherwise = [":libtestexample.so"],
- ),
- tags = [
- "no_pip", # TODO(b/78026780)
- "no_windows", # TODO(b/78028010)
- ],
-)
-
-tf_py_test(
name = "decode_proto_op_test",
size = "small",
srcs = ["decode_proto_op_test.py"],
additional_deps = [
+ ":decode_proto_op_test_base",
":py_test_deps",
- "@absl_py//absl/testing:parameterized",
- "//third_party/py/numpy",
"//tensorflow/contrib/proto:proto",
"//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
],
@@ -55,9 +34,8 @@ tf_py_test(
size = "small",
srcs = ["encode_proto_op_test.py"],
additional_deps = [
+ ":encode_proto_op_test_base",
":py_test_deps",
- "@absl_py//absl/testing:parameterized",
- "//third_party/py/numpy",
"//tensorflow/contrib/proto:proto",
"//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
"//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
@@ -73,8 +51,9 @@ tf_py_test(
)
py_library(
- name = "test_base",
- srcs = ["test_base.py"],
+ name = "proto_op_test_base",
+ testonly = 1,
+ srcs = ["proto_op_test_base.py"],
deps = [
":test_example_proto_py",
"//tensorflow/python:client_testlib",
@@ -82,13 +61,31 @@ py_library(
)
py_library(
- name = "py_test_deps",
+ name = "decode_proto_op_test_base",
+ testonly = 1,
+ srcs = ["decode_proto_op_test_base.py"],
deps = [
- ":test_base",
+ ":proto_op_test_base",
":test_example_proto_py",
+ "//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
],
)
+py_library(
+ name = "encode_proto_op_test_base",
+ testonly = 1,
+ srcs = ["encode_proto_op_test_base.py"],
+ deps = [
+ ":proto_op_test_base",
+ ":test_example_proto_py",
+ "//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
+ ],
+)
+
+py_library(name = "py_test_deps")
+
tf_proto_library(
name = "test_example_proto",
srcs = ["test_example.proto"],
@@ -103,3 +100,30 @@ tf_cc_shared_object(
":test_example_proto_cc",
],
)
+
+py_library(
+ name = "descriptor_source_test_base",
+ testonly = 1,
+ srcs = ["descriptor_source_test_base.py"],
+ deps = [
+ ":proto_op_test_base",
+ "//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
+ "@protobuf_archive//:protobuf_python",
+ ],
+)
+
+tf_py_test(
+ name = "descriptor_source_test",
+ size = "small",
+ srcs = ["descriptor_source_test.py"],
+ additional_deps = [
+ ":descriptor_source_test_base",
+ "//tensorflow/contrib/proto/python/ops:decode_proto_op_py",
+ "//tensorflow/contrib/proto/python/ops:encode_proto_op_py",
+ "//tensorflow/python:client_testlib",
+ ],
+ tags = [
+ "no_pip",
+ ],
+)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py
deleted file mode 100644
index 3b982864bc..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_fail_test.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# =============================================================================
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-
-# Python3 preparedness imports.
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import numpy as np
-
-from tensorflow.contrib.proto.python.kernel_tests import test_base
-from tensorflow.contrib.proto.python.ops import decode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import errors
-from tensorflow.python.platform import test
-
-
-class DecodeProtoFailTest(test_base.ProtoOpTestBase):
- """Test failure cases for DecodeToProto."""
-
- def _TestCorruptProtobuf(self, sanitize):
- """Test failure cases for DecodeToProto."""
-
- # The goal here is to check the error reporting.
- # Testing against a variety of corrupt protobufs is
- # done by fuzzing.
- corrupt_proto = 'This is not a binary protobuf'
-
- # Numpy silently truncates the strings if you don't specify dtype=object.
- batch = np.array(corrupt_proto, dtype=object)
- msg_type = 'tensorflow.contrib.proto.TestCase'
- field_names = ['sizes']
- field_types = [dtypes.int32]
-
- with self.test_session() as sess:
- ctensor, vtensor = decode_proto_op.decode_proto(
- batch,
- message_type=msg_type,
- field_names=field_names,
- output_types=field_types,
- sanitize=sanitize)
- with self.assertRaisesRegexp(errors.DataLossError,
- 'Unable to parse binary protobuf'
- '|Failed to consume entire buffer'):
- _ = sess.run([ctensor] + vtensor)
-
- def testCorrupt(self):
- self._TestCorruptProtobuf(sanitize=False)
-
- def testSanitizerCorrupt(self):
- self._TestCorruptProtobuf(sanitize=True)
-
-
-if __name__ == '__main__':
- test.main()
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
index 2a07794499..934035ec4c 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
+++ b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test.py
@@ -13,273 +13,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
-"""Table-driven test for decode_proto op.
+"""Tests for decode_proto op."""
-This test is run once with each of the *.TestCase.pbtxt files
-in the test directory.
-"""
# Python3 preparedness imports.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from absl.testing import parameterized
-import numpy as np
-
-
-from google.protobuf import text_format
-
-from tensorflow.contrib.proto.python.kernel_tests import test_base
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.contrib.proto.python.kernel_tests import decode_proto_op_test_base as test_base
from tensorflow.contrib.proto.python.ops import decode_proto_op
-from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
-class DecodeProtoOpTest(test_base.ProtoOpTestBase, parameterized.TestCase):
-
- def _compareValues(self, fd, vs, evs):
- """Compare lists/arrays of field values."""
-
- if len(vs) != len(evs):
- self.fail('Field %s decoded %d outputs, expected %d' %
- (fd.name, len(vs), len(evs)))
- for i, ev in enumerate(evs):
- # Special case fuzzy match for float32. TensorFlow seems to mess with
- # MAX_FLT slightly and the test doesn't work otherwise.
- # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
- if fd.cpp_type == fd.CPPTYPE_FLOAT:
- # Numpy isclose() is better than assertIsClose() which uses an absolute
- # value comparison.
- self.assertTrue(
- np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
- elif fd.cpp_type == fd.CPPTYPE_STRING:
- # In Python3 string tensor values will be represented as bytes, so we
- # reencode the proto values to match that.
- self.assertEqual(vs[i], ev.encode('ascii'))
- else:
- # Doubles and other types pass through unscathed.
- self.assertEqual(vs[i], ev)
-
- def _compareRepeatedPrimitiveValue(self, batch_shape, sizes, fields,
- field_dict):
- """Compare protos of type RepeatedPrimitiveValue.
-
- Args:
- batch_shape: the shape of the input tensor of serialized messages.
- sizes: int matrix of repeat counts returned by decode_proto
- fields: list of test_example_pb2.FieldSpec (types and expected values)
- field_dict: map from field names to decoded numpy tensors of values
- """
-
- # Check that expected values match.
- for field in fields:
- values = field_dict[field.name]
- self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
-
- fd = field.expected.DESCRIPTOR.fields_by_name[field.name]
-
- # Values has the same shape as the input plus an extra
- # dimension for repeats.
- self.assertEqual(list(values.shape)[:-1], batch_shape)
-
- # Nested messages are represented as TF strings, requiring
- # some special handling.
- if field.name == 'message_value':
- vs = []
- for buf in values.flat:
- msg = test_example_pb2.PrimitiveValue()
- msg.ParseFromString(buf)
- vs.append(msg)
- evs = getattr(field.expected, field.name)
- if len(vs) != len(evs):
- self.fail('Field %s decoded %d outputs, expected %d' %
- (fd.name, len(vs), len(evs)))
- for v, ev in zip(vs, evs):
- self.assertEqual(v, ev)
- continue
-
- # This can be a little confusing. For testing we are using
- # RepeatedPrimitiveValue in two ways: it's the proto that we
- # decode for testing, and it's used in the expected value as a
- # union type. The two cases are slightly different: this is the
- # second case.
- # We may be fetching the uint64_value from the test proto, but
- # in the expected proto we store it in the int64_value field
- # because TensorFlow doesn't support unsigned int64.
- tf_type_to_primitive_value_field = {
- dtypes.float32:
- 'float_value',
- dtypes.float64:
- 'double_value',
- dtypes.int32:
- 'int32_value',
- dtypes.uint8:
- 'uint8_value',
- dtypes.int8:
- 'int8_value',
- dtypes.string:
- 'string_value',
- dtypes.int64:
- 'int64_value',
- dtypes.bool:
- 'bool_value',
- # Unhandled TensorFlow types:
- # DT_INT16 DT_COMPLEX64 DT_QINT8 DT_QUINT8 DT_QINT32
- # DT_BFLOAT16 DT_QINT16 DT_QUINT16 DT_UINT16
- }
- tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
- if tf_field_name is None:
- self.fail('Unhandled tensorflow type %d' % field.dtype)
-
- self._compareValues(fd, values.flat,
- getattr(field.expected, tf_field_name))
-
- def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
- message_type, message_format, sanitize,
- force_disordered=False):
- """Run decode tests on a batch of messages.
-
- Args:
- fields: list of test_example_pb2.FieldSpec (types and expected values)
- case_sizes: expected sizes array
- batch_shape: the shape of the input tensor of serialized messages
- batch: list of serialized messages
- message_type: descriptor name for messages
- message_format: format of messages, 'text' or 'binary'
- sanitize: whether to sanitize binary protobuf inputs
- force_disordered: whether to force fields encoded out of order.
- """
-
- if force_disordered:
- # Exercise code path that handles out-of-order fields by prepending extra
- # fields with tag numbers higher than any real field. Note that this won't
- # work with sanitization because that forces reserialization using a
- # trusted decoder and encoder.
- assert not sanitize
- extra_fields = test_example_pb2.ExtraFields()
- extra_fields.string_value = 'IGNORE ME'
- extra_fields.bool_value = False
- extra_msg = extra_fields.SerializeToString()
- batch = [extra_msg + msg for msg in batch]
-
- # Numpy silently truncates the strings if you don't specify dtype=object.
- batch = np.array(batch, dtype=object)
- batch = np.reshape(batch, batch_shape)
-
- field_names = [f.name for f in fields]
- output_types = [f.dtype for f in fields]
-
- with self.test_session() as sess:
- sizes, vtensor = decode_proto_op.decode_proto(
- batch,
- message_type=message_type,
- field_names=field_names,
- output_types=output_types,
- message_format=message_format,
- sanitize=sanitize)
-
- vlist = sess.run([sizes] + vtensor)
- sizes = vlist[0]
- # Values is a list of tensors, one for each field.
- value_tensors = vlist[1:]
-
- # Check that the repeat sizes are correct.
- self.assertTrue(
- np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
-
- # Check that the decoded sizes match the expected sizes.
- self.assertEqual(len(sizes.flat), len(case_sizes))
- self.assertTrue(
- np.all(sizes.flat == np.array(
- case_sizes, dtype=np.int32)))
-
- field_dict = dict(zip(field_names, value_tensors))
-
- self._compareRepeatedPrimitiveValue(batch_shape, sizes, fields,
- field_dict)
-
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testBinary(self, case):
- batch = [primitive.SerializeToString() for primitive in case.primitive]
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'binary',
- sanitize=False)
-
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testBinaryDisordered(self, case):
- batch = [primitive.SerializeToString() for primitive in case.primitive]
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'binary',
- sanitize=False,
- force_disordered=True)
-
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testPacked(self, case):
- # Now try with the packed serialization.
- # We test the packed representations by loading the same test cases
- # using PackedPrimitiveValue instead of RepeatedPrimitiveValue.
- # To do this we rely on the text format being the same for packed and
- # unpacked fields, and reparse the test message using the packed version
- # of the proto.
- packed_batch = [
- # Note: float_format='.17g' is necessary to ensure preservation of
- # doubles and floats in text format.
- text_format.Parse(
- text_format.MessageToString(
- primitive, float_format='.17g'),
- test_example_pb2.PackedPrimitiveValue()).SerializeToString()
- for primitive in case.primitive
- ]
-
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- packed_batch,
- 'tensorflow.contrib.proto.PackedPrimitiveValue',
- 'binary',
- sanitize=False)
-
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testText(self, case):
- # Note: float_format='.17g' is necessary to ensure preservation of
- # doubles and floats in text format.
- text_batch = [
- text_format.MessageToString(
- primitive, float_format='.17g') for primitive in case.primitive
- ]
-
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- text_batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'text',
- sanitize=False)
+class DecodeProtoOpTest(test_base.DecodeProtoOpTestBase):
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testSanitizerGood(self, case):
- batch = [primitive.SerializeToString() for primitive in case.primitive]
- self._runDecodeProtoTests(
- case.field,
- case.sizes,
- list(case.shape),
- batch,
- 'tensorflow.contrib.proto.RepeatedPrimitiveValue',
- 'binary',
- sanitize=True)
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ super(DecodeProtoOpTest, self).__init__(decode_proto_op, methodName)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py
new file mode 100644
index 0000000000..e3570e38a3
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/decode_proto_op_test_base.py
@@ -0,0 +1,303 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Tests for decode_proto op."""
+
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+
+from google.protobuf import text_format
+
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+
+
+class DecodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
+ """Base class for testing proto decoding ops."""
+
+ def __init__(self, decode_module, methodName='runTest'): # pylint: disable=invalid-name
+ """DecodeProtoOpTestBase initializer.
+
+ Args:
+ decode_module: a module containing the `decode_proto_op` method
+ methodName: the name of the test method (same as for test.TestCase)
+ """
+
+ super(DecodeProtoOpTestBase, self).__init__(methodName)
+ self._decode_module = decode_module
+
+ def _compareValues(self, fd, vs, evs):
+ """Compare lists/arrays of field values."""
+
+ if len(vs) != len(evs):
+ self.fail('Field %s decoded %d outputs, expected %d' %
+ (fd.name, len(vs), len(evs)))
+ for i, ev in enumerate(evs):
+ # Special case fuzzy match for float32. TensorFlow seems to mess with
+ # MAX_FLT slightly and the test doesn't work otherwise.
+ # TODO(nix): ask on TF list about why MAX_FLT doesn't pass through.
+ if fd.cpp_type == fd.CPPTYPE_FLOAT:
+ # Numpy isclose() is better than assertIsClose() which uses an absolute
+ # value comparison.
+ self.assertTrue(
+ np.isclose(vs[i], ev), 'expected %r, actual %r' % (ev, vs[i]))
+ elif fd.cpp_type == fd.CPPTYPE_STRING:
+ # In Python3 string tensor values will be represented as bytes, so we
+ # reencode the proto values to match that.
+ self.assertEqual(vs[i], ev.encode('ascii'))
+ else:
+ # Doubles and other types pass through unscathed.
+ self.assertEqual(vs[i], ev)
+
+ def _compareProtos(self, batch_shape, sizes, fields, field_dict):
+ """Compare protos of type TestValue.
+
+ Args:
+ batch_shape: the shape of the input tensor of serialized messages.
+ sizes: int matrix of repeat counts returned by decode_proto
+ fields: list of test_example_pb2.FieldSpec (types and expected values)
+ field_dict: map from field names to decoded numpy tensors of values
+ """
+
+ # Check that expected values match.
+ for field in fields:
+ values = field_dict[field.name]
+ self.assertEqual(dtypes.as_dtype(values.dtype), field.dtype)
+
+ fd = field.value.DESCRIPTOR.fields_by_name[field.name]
+
+ # Values has the same shape as the input plus an extra
+ # dimension for repeats.
+ self.assertEqual(list(values.shape)[:-1], batch_shape)
+
+ # Nested messages are represented as TF strings, requiring
+ # some special handling.
+ if field.name == 'message_value':
+ vs = []
+ for buf in values.flat:
+ msg = test_example_pb2.PrimitiveValue()
+ msg.ParseFromString(buf)
+ vs.append(msg)
+ evs = getattr(field.value, field.name)
+ if len(vs) != len(evs):
+ self.fail('Field %s decoded %d outputs, expected %d' %
+ (fd.name, len(vs), len(evs)))
+ for v, ev in zip(vs, evs):
+ self.assertEqual(v, ev)
+ continue
+
+ tf_type_to_primitive_value_field = {
+ dtypes.bool:
+ 'bool_value',
+ dtypes.float32:
+ 'float_value',
+ dtypes.float64:
+ 'double_value',
+ dtypes.int8:
+ 'int8_value',
+ dtypes.int32:
+ 'int32_value',
+ dtypes.int64:
+ 'int64_value',
+ dtypes.string:
+ 'string_value',
+ dtypes.uint8:
+ 'uint8_value',
+ dtypes.uint32:
+ 'uint32_value',
+ dtypes.uint64:
+ 'uint64_value',
+ }
+ tf_field_name = tf_type_to_primitive_value_field.get(field.dtype)
+ if tf_field_name is None:
+ self.fail('Unhandled tensorflow type %d' % field.dtype)
+
+ self._compareValues(fd, values.flat,
+ getattr(field.value, tf_field_name))
+
+ def _runDecodeProtoTests(self, fields, case_sizes, batch_shape, batch,
+ message_type, message_format, sanitize,
+ force_disordered=False):
+ """Run decode tests on a batch of messages.
+
+ Args:
+ fields: list of test_example_pb2.FieldSpec (types and expected values)
+ case_sizes: expected sizes array
+ batch_shape: the shape of the input tensor of serialized messages
+ batch: list of serialized messages
+ message_type: descriptor name for messages
+ message_format: format of messages, 'text' or 'binary'
+ sanitize: whether to sanitize binary protobuf inputs
+ force_disordered: whether to force fields encoded out of order.
+ """
+
+ if force_disordered:
+ # Exercise code path that handles out-of-order fields by prepending extra
+ # fields with tag numbers higher than any real field. Note that this won't
+ # work with sanitization because that forces reserialization using a
+ # trusted decoder and encoder.
+ assert not sanitize
+ extra_fields = test_example_pb2.ExtraFields()
+ extra_fields.string_value = 'IGNORE ME'
+ extra_fields.bool_value = False
+ extra_msg = extra_fields.SerializeToString()
+ batch = [extra_msg + msg for msg in batch]
+
+ # Numpy silently truncates the strings if you don't specify dtype=object.
+ batch = np.array(batch, dtype=object)
+ batch = np.reshape(batch, batch_shape)
+
+ field_names = [f.name for f in fields]
+ output_types = [f.dtype for f in fields]
+
+ with self.test_session() as sess:
+ sizes, vtensor = self._decode_module.decode_proto(
+ batch,
+ message_type=message_type,
+ field_names=field_names,
+ output_types=output_types,
+ message_format=message_format,
+ sanitize=sanitize)
+
+ vlist = sess.run([sizes] + vtensor)
+ sizes = vlist[0]
+ # Values is a list of tensors, one for each field.
+ value_tensors = vlist[1:]
+
+ # Check that the repeat sizes are correct.
+ self.assertTrue(
+ np.all(np.array(sizes.shape) == batch_shape + [len(field_names)]))
+
+ # Check that the decoded sizes match the expected sizes.
+ self.assertEqual(len(sizes.flat), len(case_sizes))
+ self.assertTrue(
+ np.all(sizes.flat == np.array(
+ case_sizes, dtype=np.int32)))
+
+ field_dict = dict(zip(field_names, value_tensors))
+
+ self._compareProtos(batch_shape, sizes, fields, field_dict)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testBinary(self, case):
+ batch = [value.SerializeToString() for value in case.values]
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'binary',
+ sanitize=False)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testBinaryDisordered(self, case):
+ batch = [value.SerializeToString() for value in case.values]
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'binary',
+ sanitize=False,
+ force_disordered=True)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testPacked(self, case):
+ # Now try with the packed serialization.
+ #
+ # We test the packed representations by loading the same test case using
+ # PackedTestValue instead of TestValue. To do this we rely on the text
+ # format being the same for packed and unpacked fields, and reparse the
+ # test message using the packed version of the proto.
+ packed_batch = [
+ # Note: float_format='.17g' is necessary to ensure preservation of
+ # doubles and floats in text format.
+ text_format.Parse(
+ text_format.MessageToString(
+ value, float_format='.17g'),
+ test_example_pb2.PackedTestValue()).SerializeToString()
+ for value in case.values
+ ]
+
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ packed_batch,
+ 'tensorflow.contrib.proto.PackedTestValue',
+ 'binary',
+ sanitize=False)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testText(self, case):
+ # Note: float_format='.17g' is necessary to ensure preservation of
+ # doubles and floats in text format.
+ text_batch = [
+ text_format.MessageToString(
+ value, float_format='.17g') for value in case.values
+ ]
+
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ text_batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'text',
+ sanitize=False)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testSanitizerGood(self, case):
+ batch = [value.SerializeToString() for value in case.values]
+ self._runDecodeProtoTests(
+ case.fields,
+ case.sizes,
+ list(case.shapes),
+ batch,
+ 'tensorflow.contrib.proto.TestValue',
+ 'binary',
+ sanitize=True)
+
+ @parameterized.parameters((False), (True))
+ def testCorruptProtobuf(self, sanitize):
+ corrupt_proto = 'This is not a binary protobuf'
+
+ # Numpy silently truncates the strings if you don't specify dtype=object.
+ batch = np.array(corrupt_proto, dtype=object)
+ msg_type = 'tensorflow.contrib.proto.TestCase'
+ field_names = ['sizes']
+ field_types = [dtypes.int32]
+
+ with self.test_session() as sess:
+ ctensor, vtensor = self._decode_module.decode_proto(
+ batch,
+ message_type=msg_type,
+ field_names=field_names,
+ output_types=field_types,
+ sanitize=sanitize)
+ with self.assertRaisesRegexp(errors.DataLossError,
+ 'Unable to parse binary protobuf'
+ '|Failed to consume entire buffer'):
+ _ = sess.run([ctensor] + vtensor)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test.py b/tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test.py
new file mode 100644
index 0000000000..32ca318f73
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test.py
@@ -0,0 +1,36 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Tests for proto ops reading descriptors from other sources."""
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.contrib.proto.python.kernel_tests import descriptor_source_test_base as test_base
+from tensorflow.contrib.proto.python.ops import decode_proto_op
+from tensorflow.contrib.proto.python.ops import encode_proto_op
+from tensorflow.python.platform import test
+
+
+class DescriptorSourceTest(test_base.DescriptorSourceTestBase):
+
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ super(DescriptorSourceTest, self).__init__(decode_proto_op, encode_proto_op,
+ methodName)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test_base.py
new file mode 100644
index 0000000000..9a1c04af32
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/descriptor_source_test_base.py
@@ -0,0 +1,176 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Tests for proto ops reading descriptors from other sources."""
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import os
+
+import numpy as np
+
+from google.protobuf.descriptor_pb2 import FieldDescriptorProto
+from google.protobuf.descriptor_pb2 import FileDescriptorSet
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.python.framework import dtypes
+from tensorflow.python.platform import test
+
+
+class DescriptorSourceTestBase(test.TestCase):
+ """Base class for testing descriptor sources."""
+
+ def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
+ """DescriptorSourceTestBase initializer.
+
+ Args:
+ decode_module: a module containing the `decode_proto_op` method
+ encode_module: a module containing the `encode_proto_op` method
+ methodName: the name of the test method (same as for test.TestCase)
+ """
+
+ super(DescriptorSourceTestBase, self).__init__(methodName)
+ self._decode_module = decode_module
+ self._encode_module = encode_module
+
+ # NOTE: We generate the descriptor programmatically instead of via a compiler
+ # because of differences between different versions of the compiler.
+ #
+ # The generated descriptor should capture the subset of `test_example.proto`
+ # used in `test_base.simple_test_case()`.
+ def _createDescriptorFile(self):
+ set_proto = FileDescriptorSet()
+
+ file_proto = set_proto.file.add(
+ name='types.proto',
+ package='tensorflow',
+ syntax='proto3')
+ enum_proto = file_proto.enum_type.add(name='DataType')
+ enum_proto.value.add(name='DT_DOUBLE', number=0)
+ enum_proto.value.add(name='DT_BOOL', number=1)
+
+ file_proto = set_proto.file.add(
+ name='test_example.proto',
+ package='tensorflow.contrib.proto',
+ dependency=['types.proto'])
+ message_proto = file_proto.message_type.add(name='TestCase')
+ message_proto.field.add(
+ name='values',
+ number=1,
+ type=FieldDescriptorProto.TYPE_MESSAGE,
+ type_name='.tensorflow.contrib.proto.TestValue',
+ label=FieldDescriptorProto.LABEL_REPEATED)
+ message_proto.field.add(
+ name='shapes',
+ number=2,
+ type=FieldDescriptorProto.TYPE_INT32,
+ label=FieldDescriptorProto.LABEL_REPEATED)
+ message_proto.field.add(
+ name='sizes',
+ number=3,
+ type=FieldDescriptorProto.TYPE_INT32,
+ label=FieldDescriptorProto.LABEL_REPEATED)
+ message_proto.field.add(
+ name='fields',
+ number=4,
+ type=FieldDescriptorProto.TYPE_MESSAGE,
+ type_name='.tensorflow.contrib.proto.FieldSpec',
+ label=FieldDescriptorProto.LABEL_REPEATED)
+
+ message_proto = file_proto.message_type.add(
+ name='TestValue')
+ message_proto.field.add(
+ name='double_value',
+ number=1,
+ type=FieldDescriptorProto.TYPE_DOUBLE,
+ label=FieldDescriptorProto.LABEL_REPEATED)
+ message_proto.field.add(
+ name='bool_value',
+ number=2,
+ type=FieldDescriptorProto.TYPE_BOOL,
+ label=FieldDescriptorProto.LABEL_REPEATED)
+
+ message_proto = file_proto.message_type.add(
+ name='FieldSpec')
+ message_proto.field.add(
+ name='name',
+ number=1,
+ type=FieldDescriptorProto.TYPE_STRING,
+ label=FieldDescriptorProto.LABEL_OPTIONAL)
+ message_proto.field.add(
+ name='dtype',
+ number=2,
+ type=FieldDescriptorProto.TYPE_ENUM,
+ type_name='.tensorflow.DataType',
+ label=FieldDescriptorProto.LABEL_OPTIONAL)
+ message_proto.field.add(
+ name='value',
+ number=3,
+ type=FieldDescriptorProto.TYPE_MESSAGE,
+ type_name='.tensorflow.contrib.proto.TestValue',
+ label=FieldDescriptorProto.LABEL_OPTIONAL)
+
+ fn = os.path.join(self.get_temp_dir(), 'descriptor.pb')
+ with open(fn, 'wb') as f:
+ f.write(set_proto.SerializeToString())
+ return fn
+
+ def _testRoundtrip(self, descriptor_source):
+ # Numpy silently truncates the strings if you don't specify dtype=object.
+ in_bufs = np.array(
+ [test_base.ProtoOpTestBase.simple_test_case().SerializeToString()],
+ dtype=object)
+ message_type = 'tensorflow.contrib.proto.TestCase'
+ field_names = ['values', 'shapes', 'sizes', 'fields']
+ tensor_types = [dtypes.string, dtypes.int32, dtypes.int32, dtypes.string]
+
+ with self.test_session() as sess:
+ sizes, field_tensors = self._decode_module.decode_proto(
+ in_bufs,
+ message_type=message_type,
+ field_names=field_names,
+ output_types=tensor_types,
+ descriptor_source=descriptor_source)
+
+ out_tensors = self._encode_module.encode_proto(
+ sizes,
+ field_tensors,
+ message_type=message_type,
+ field_names=field_names,
+ descriptor_source=descriptor_source)
+
+ out_bufs, = sess.run([out_tensors])
+
+ # Check that the re-encoded tensor has the same shape.
+ self.assertEqual(in_bufs.shape, out_bufs.shape)
+
+ # Compare the input and output.
+ for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
+ # Check that the input and output serialized messages are identical.
+ # If we fail here, there is a difference in the serialized
+ # representation but the new serialization still parses. This could
+ # be harmless (a change in map ordering?) or it could be bad (e.g.
+ # loss of packing in the encoding).
+ self.assertEqual(in_buf, out_buf)
+
+ def testWithFileDescriptorSet(self):
+ # First try parsing with a local proto db, which should fail.
+ with self.assertRaisesOpError('No descriptor found for message type'):
+ self._testRoundtrip('local://')
+
+ # Now try parsing with a FileDescriptorSet which contains the test proto.
+ descriptor_file = self._createDescriptorFile()
+ self._testRoundtrip(descriptor_file)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
index fb33660554..fc5cd25d43 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
+++ b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test.py
@@ -13,164 +13,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
-"""Table-driven test for encode_proto op.
+"""Tests for encode_proto op."""
-This test is run once with each of the *.TestCase.pbtxt files
-in the test directory.
-
-It tests that encode_proto is a lossless inverse of decode_proto
-(for the specified fields).
-"""
# Python3 readiness boilerplate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from absl.testing import parameterized
-import numpy as np
-
-from google.protobuf import text_format
-
-from tensorflow.contrib.proto.python.kernel_tests import test_base
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.contrib.proto.python.kernel_tests import encode_proto_op_test_base as test_base
from tensorflow.contrib.proto.python.ops import decode_proto_op
from tensorflow.contrib.proto.python.ops import encode_proto_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.platform import flags
from tensorflow.python.platform import test
-FLAGS = flags.FLAGS
-
-flags.DEFINE_string('message_text_file', None,
- 'A file containing a text serialized TestCase protobuf.')
-
-
-class EncodeProtoOpTest(test_base.ProtoOpTestBase, parameterized.TestCase):
-
- def testBadInputs(self):
- # Invalid field name
- with self.test_session():
- with self.assertRaisesOpError('Unknown field: non_existent_field'):
- encode_proto_op.encode_proto(
- sizes=[[1]],
- values=[np.array([[0.0]], dtype=np.int32)],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['non_existent_field']).eval()
-
- # Incorrect types.
- with self.test_session():
- with self.assertRaisesOpError(
- 'Incompatible type for field double_value.'):
- encode_proto_op.encode_proto(
- sizes=[[1]],
- values=[np.array([[0.0]], dtype=np.int32)],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['double_value']).eval()
-
- # Incorrect shapes of sizes.
- with self.test_session():
- with self.assertRaisesOpError(
- r'sizes should be batch_size \+ \[len\(field_names\)\]'):
- sizes = array_ops.placeholder(dtypes.int32)
- values = array_ops.placeholder(dtypes.float64)
- encode_proto_op.encode_proto(
- sizes=sizes,
- values=[values],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['double_value']).eval(feed_dict={
- sizes: [[[0, 0]]],
- values: [[0.0]]
- })
-
- # Inconsistent shapes of values.
- with self.test_session():
- with self.assertRaisesOpError(
- 'Values must match up to the last dimension'):
- sizes = array_ops.placeholder(dtypes.int32)
- values1 = array_ops.placeholder(dtypes.float64)
- values2 = array_ops.placeholder(dtypes.int32)
- (encode_proto_op.encode_proto(
- sizes=[[1, 1]],
- values=[values1, values2],
- message_type='tensorflow.contrib.proto.RepeatedPrimitiveValue',
- field_names=['double_value', 'int32_value']).eval(feed_dict={
- values1: [[0.0]],
- values2: [[0], [0]]
- }))
-
- def _testRoundtrip(self, in_bufs, message_type, fields):
-
- field_names = [f.name for f in fields]
- out_types = [f.dtype for f in fields]
-
- with self.test_session() as sess:
- sizes, field_tensors = decode_proto_op.decode_proto(
- in_bufs,
- message_type=message_type,
- field_names=field_names,
- output_types=out_types)
-
- out_tensors = encode_proto_op.encode_proto(
- sizes,
- field_tensors,
- message_type=message_type,
- field_names=field_names)
-
- out_bufs, = sess.run([out_tensors])
-
- # Check that the re-encoded tensor has the same shape.
- self.assertEqual(in_bufs.shape, out_bufs.shape)
-
- # Compare the input and output.
- for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
- in_obj = test_example_pb2.RepeatedPrimitiveValue()
- in_obj.ParseFromString(in_buf)
-
- out_obj = test_example_pb2.RepeatedPrimitiveValue()
- out_obj.ParseFromString(out_buf)
-
- # Check that the deserialized objects are identical.
- self.assertEqual(in_obj, out_obj)
-
- # Check that the input and output serialized messages are identical.
- # If we fail here, there is a difference in the serialized
- # representation but the new serialization still parses. This could
- # be harmless (a change in map ordering?) or it could be bad (e.g.
- # loss of packing in the encoding).
- self.assertEqual(in_buf, out_buf)
-
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testRoundtrip(self, case):
- in_bufs = [primitive.SerializeToString() for primitive in case.primitive]
-
- # np.array silently truncates strings if you don't specify dtype=object.
- in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shape))
- return self._testRoundtrip(
- in_bufs, 'tensorflow.contrib.proto.RepeatedPrimitiveValue', case.field)
- @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
- def testRoundtripPacked(self, case):
- # Now try with the packed serialization.
- # We test the packed representations by loading the same test cases
- # using PackedPrimitiveValue instead of RepeatedPrimitiveValue.
- # To do this we rely on the text format being the same for packed and
- # unpacked fields, and reparse the test message using the packed version
- # of the proto.
- in_bufs = [
- # Note: float_format='.17g' is necessary to ensure preservation of
- # doubles and floats in text format.
- text_format.Parse(
- text_format.MessageToString(
- primitive, float_format='.17g'),
- test_example_pb2.PackedPrimitiveValue()).SerializeToString()
- for primitive in case.primitive
- ]
+class EncodeProtoOpTest(test_base.EncodeProtoOpTestBase):
- # np.array silently truncates strings if you don't specify dtype=object.
- in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shape))
- return self._testRoundtrip(
- in_bufs, 'tensorflow.contrib.proto.PackedPrimitiveValue', case.field)
+ def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
+ super(EncodeProtoOpTest, self).__init__(decode_proto_op, encode_proto_op,
+ methodName)
if __name__ == '__main__':
diff --git a/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py
new file mode 100644
index 0000000000..07dfb924d3
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/encode_proto_op_test_base.py
@@ -0,0 +1,177 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Table-driven test for encode_proto op.
+
+This test is run once with each of the *.TestCase.pbtxt files
+in the test directory.
+
+It tests that encode_proto is a lossless inverse of decode_proto
+(for the specified fields).
+"""
+# Python3 readiness boilerplate
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from absl.testing import parameterized
+import numpy as np
+
+from google.protobuf import text_format
+
+from tensorflow.contrib.proto.python.kernel_tests import proto_op_test_base as test_base
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+
+
+class EncodeProtoOpTestBase(test_base.ProtoOpTestBase, parameterized.TestCase):
+ """Base class for testing proto encoding ops."""
+
+ def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
+ """EncodeProtoOpTestBase initializer.
+
+ Args:
+ decode_module: a module containing the `decode_proto_op` method
+ encode_module: a module containing the `encode_proto_op` method
+ methodName: the name of the test method (same as for test.TestCase)
+ """
+
+ super(EncodeProtoOpTestBase, self).__init__(methodName)
+ self._decode_module = decode_module
+ self._encode_module = encode_module
+
+ def testBadInputs(self):
+ # Invalid field name
+ with self.test_session():
+ with self.assertRaisesOpError('Unknown field: non_existent_field'):
+ self._encode_module.encode_proto(
+ sizes=[[1]],
+ values=[np.array([[0.0]], dtype=np.int32)],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['non_existent_field']).eval()
+
+ # Incorrect types.
+ with self.test_session():
+ with self.assertRaisesOpError(
+ 'Incompatible type for field double_value.'):
+ self._encode_module.encode_proto(
+ sizes=[[1]],
+ values=[np.array([[0.0]], dtype=np.int32)],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['double_value']).eval()
+
+ # Incorrect shapes of sizes.
+ with self.test_session():
+ with self.assertRaisesOpError(
+ r'sizes should be batch_size \+ \[len\(field_names\)\]'):
+ sizes = array_ops.placeholder(dtypes.int32)
+ values = array_ops.placeholder(dtypes.float64)
+ self._encode_module.encode_proto(
+ sizes=sizes,
+ values=[values],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['double_value']).eval(feed_dict={
+ sizes: [[[0, 0]]],
+ values: [[0.0]]
+ })
+
+ # Inconsistent shapes of values.
+ with self.test_session():
+ with self.assertRaisesOpError(
+ 'Values must match up to the last dimension'):
+ sizes = array_ops.placeholder(dtypes.int32)
+ values1 = array_ops.placeholder(dtypes.float64)
+ values2 = array_ops.placeholder(dtypes.int32)
+ (self._encode_module.encode_proto(
+ sizes=[[1, 1]],
+ values=[values1, values2],
+ message_type='tensorflow.contrib.proto.TestValue',
+ field_names=['double_value', 'int32_value']).eval(feed_dict={
+ values1: [[0.0]],
+ values2: [[0], [0]]
+ }))
+
+ def _testRoundtrip(self, in_bufs, message_type, fields):
+
+ field_names = [f.name for f in fields]
+ out_types = [f.dtype for f in fields]
+
+ with self.test_session() as sess:
+ sizes, field_tensors = self._decode_module.decode_proto(
+ in_bufs,
+ message_type=message_type,
+ field_names=field_names,
+ output_types=out_types)
+
+ out_tensors = self._encode_module.encode_proto(
+ sizes,
+ field_tensors,
+ message_type=message_type,
+ field_names=field_names)
+
+ out_bufs, = sess.run([out_tensors])
+
+ # Check that the re-encoded tensor has the same shape.
+ self.assertEqual(in_bufs.shape, out_bufs.shape)
+
+ # Compare the input and output.
+ for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
+ in_obj = test_example_pb2.TestValue()
+ in_obj.ParseFromString(in_buf)
+
+ out_obj = test_example_pb2.TestValue()
+ out_obj.ParseFromString(out_buf)
+
+ # Check that the deserialized objects are identical.
+ self.assertEqual(in_obj, out_obj)
+
+ # Check that the input and output serialized messages are identical.
+ # If we fail here, there is a difference in the serialized
+ # representation but the new serialization still parses. This could
+ # be harmless (a change in map ordering?) or it could be bad (e.g.
+ # loss of packing in the encoding).
+ self.assertEqual(in_buf, out_buf)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testRoundtrip(self, case):
+ in_bufs = [value.SerializeToString() for value in case.values]
+
+ # np.array silently truncates strings if you don't specify dtype=object.
+ in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
+ return self._testRoundtrip(
+ in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
+
+ @parameterized.named_parameters(*test_base.ProtoOpTestBase.named_parameters())
+ def testRoundtripPacked(self, case):
+ # Now try with the packed serialization.
+ # We test the packed representations by loading the same test cases using
+ # PackedTestValue instead of TestValue. To do this we rely on the text
+ # format being the same for packed and unpacked fields, and reparse the test
+ # message using the packed version of the proto.
+ in_bufs = [
+ # Note: float_format='.17g' is necessary to ensure preservation of
+ # doubles and floats in text format.
+ text_format.Parse(
+ text_format.MessageToString(
+ value, float_format='.17g'),
+ test_example_pb2.PackedTestValue()).SerializeToString()
+ for value in case.values
+ ]
+
+ # np.array silently truncates strings if you don't specify dtype=object.
+ in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
+ return self._testRoundtrip(
+ in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
diff --git a/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py b/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py
new file mode 100644
index 0000000000..2950c7dfdc
--- /dev/null
+++ b/tensorflow/contrib/proto/python/kernel_tests/proto_op_test_base.py
@@ -0,0 +1,419 @@
+# =============================================================================
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# =============================================================================
+"""Test case base for testing proto operations."""
+
+# Python3 preparedness imports.
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import ctypes as ct
+import os
+
+from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
+from tensorflow.core.framework import types_pb2
+from tensorflow.python.platform import test
+
+
+class ProtoOpTestBase(test.TestCase):
+ """Base class for testing proto decoding and encoding ops."""
+
+ def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
+ super(ProtoOpTestBase, self).__init__(methodName)
+ lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
+ if os.path.isfile(lib):
+ ct.cdll.LoadLibrary(lib)
+
+ @staticmethod
+ def named_parameters():
+ return (
+ ("defaults", ProtoOpTestBase.defaults_test_case()),
+ ("minmax", ProtoOpTestBase.minmax_test_case()),
+ ("nested", ProtoOpTestBase.nested_test_case()),
+ ("optional", ProtoOpTestBase.optional_test_case()),
+ ("promote", ProtoOpTestBase.promote_test_case()),
+ ("ragged", ProtoOpTestBase.ragged_test_case()),
+ ("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
+ ("simple", ProtoOpTestBase.simple_test_case()),
+ )
+
+ @staticmethod
+ def defaults_test_case():
+ test_case = test_example_pb2.TestCase()
+ test_case.values.add() # No fields specified, so we get all defaults.
+ test_case.shapes.append(1)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "double_value_with_default"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(1.0)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "float_value_with_default"
+ field.dtype = types_pb2.DT_FLOAT
+ field.value.float_value.append(2.0)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "int64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(3)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sfixed64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(11)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sint64_value_with_default"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(13)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "uint64_value_with_default"
+ field.dtype = types_pb2.DT_UINT64
+ field.value.uint64_value.append(4)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "fixed64_value_with_default"
+ field.dtype = types_pb2.DT_UINT64
+ field.value.uint64_value.append(6)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "int32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(5)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sfixed32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(10)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "sint32_value_with_default"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(12)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "uint32_value_with_default"
+ field.dtype = types_pb2.DT_UINT32
+ field.value.uint32_value.append(9)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "fixed32_value_with_default"
+ field.dtype = types_pb2.DT_UINT32
+ field.value.uint32_value.append(7)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "bool_value_with_default"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "string_value_with_default"
+ field.dtype = types_pb2.DT_STRING
+ field.value.string_value.append("a")
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "bytes_value_with_default"
+ field.dtype = types_pb2.DT_STRING
+ field.value.string_value.append("a longer default string")
+ return test_case
+
+ @staticmethod
+ def minmax_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(-1.7976931348623158e+308)
+ value.double_value.append(2.2250738585072014e-308)
+ value.double_value.append(1.7976931348623158e+308)
+ value.float_value.append(-3.402823466e+38)
+ value.float_value.append(1.175494351e-38)
+ value.float_value.append(3.402823466e+38)
+ value.int64_value.append(-9223372036854775808)
+ value.int64_value.append(9223372036854775807)
+ value.sfixed64_value.append(-9223372036854775808)
+ value.sfixed64_value.append(9223372036854775807)
+ value.sint64_value.append(-9223372036854775808)
+ value.sint64_value.append(9223372036854775807)
+ value.uint64_value.append(0)
+ value.uint64_value.append(18446744073709551615)
+ value.fixed64_value.append(0)
+ value.fixed64_value.append(18446744073709551615)
+ value.int32_value.append(-2147483648)
+ value.int32_value.append(2147483647)
+ value.sfixed32_value.append(-2147483648)
+ value.sfixed32_value.append(2147483647)
+ value.sint32_value.append(-2147483648)
+ value.sint32_value.append(2147483647)
+ value.uint32_value.append(0)
+ value.uint32_value.append(4294967295)
+ value.fixed32_value.append(0)
+ value.fixed32_value.append(4294967295)
+ value.bool_value.append(False)
+ value.bool_value.append(True)
+ value.string_value.append("")
+ value.string_value.append("I refer to the infinite.")
+ test_case.shapes.append(1)
+ test_case.sizes.append(3)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(-1.7976931348623158e+308)
+ field.value.double_value.append(2.2250738585072014e-308)
+ field.value.double_value.append(1.7976931348623158e+308)
+ test_case.sizes.append(3)
+ field = test_case.fields.add()
+ field.name = "float_value"
+ field.dtype = types_pb2.DT_FLOAT
+ field.value.float_value.append(-3.402823466e+38)
+ field.value.float_value.append(1.175494351e-38)
+ field.value.float_value.append(3.402823466e+38)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "int64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(-9223372036854775808)
+ field.value.int64_value.append(9223372036854775807)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sfixed64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(-9223372036854775808)
+ field.value.int64_value.append(9223372036854775807)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sint64_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(-9223372036854775808)
+ field.value.int64_value.append(9223372036854775807)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "uint64_value"
+ field.dtype = types_pb2.DT_UINT64
+ field.value.uint64_value.append(0)
+ field.value.uint64_value.append(18446744073709551615)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "fixed64_value"
+ field.dtype = types_pb2.DT_UINT64
+ field.value.uint64_value.append(0)
+ field.value.uint64_value.append(18446744073709551615)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "int32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(-2147483648)
+ field.value.int32_value.append(2147483647)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sfixed32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(-2147483648)
+ field.value.int32_value.append(2147483647)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "sint32_value"
+ field.dtype = types_pb2.DT_INT32
+ field.value.int32_value.append(-2147483648)
+ field.value.int32_value.append(2147483647)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "uint32_value"
+ field.dtype = types_pb2.DT_UINT32
+ field.value.uint32_value.append(0)
+ field.value.uint32_value.append(4294967295)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "fixed32_value"
+ field.dtype = types_pb2.DT_UINT32
+ field.value.uint32_value.append(0)
+ field.value.uint32_value.append(4294967295)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(False)
+ field.value.bool_value.append(True)
+ test_case.sizes.append(2)
+ field = test_case.fields.add()
+ field.name = "string_value"
+ field.dtype = types_pb2.DT_STRING
+ field.value.string_value.append("")
+ field.value.string_value.append("I refer to the infinite.")
+ return test_case
+
+ @staticmethod
+ def nested_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ message_value = value.message_value.add()
+ message_value.double_value = 23.5
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "message_value"
+ field.dtype = types_pb2.DT_STRING
+ message_value = field.value.message_value.add()
+ message_value.double_value = 23.5
+ return test_case
+
+ @staticmethod
+ def optional_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.bool_value.append(True)
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ test_case.sizes.append(0)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(0.0)
+ return test_case
+
+ @staticmethod
+ def promote_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.sint32_value.append(2147483647)
+ value.sfixed32_value.append(2147483647)
+ value.int32_value.append(2147483647)
+ value.fixed32_value.append(4294967295)
+ value.uint32_value.append(4294967295)
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "sint32_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(2147483647)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "sfixed32_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(2147483647)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "int32_value"
+ field.dtype = types_pb2.DT_INT64
+ field.value.int64_value.append(2147483647)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "fixed32_value"
+ field.dtype = types_pb2.DT_UINT64
+ field.value.uint64_value.append(4294967295)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "uint32_value"
+ field.dtype = types_pb2.DT_UINT64
+ field.value.uint64_value.append(4294967295)
+ return test_case
+
+ @staticmethod
+ def ragged_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(23.5)
+ value.double_value.append(123.0)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(3.1)
+ value.bool_value.append(False)
+ test_case.shapes.append(2)
+ test_case.sizes.append(2)
+ test_case.sizes.append(1)
+ test_case.sizes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(23.5)
+ field.value.double_value.append(123.0)
+ field.value.double_value.append(3.1)
+ field.value.double_value.append(0.0)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(False)
+ return test_case
+
+ @staticmethod
+ def shaped_batch_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(23.5)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(44.0)
+ value.bool_value.append(False)
+ value = test_case.values.add()
+ value.double_value.append(3.14159)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(1.414)
+ value.bool_value.append(True)
+ value = test_case.values.add()
+ value.double_value.append(-32.2)
+ value.bool_value.append(False)
+ value = test_case.values.add()
+ value.double_value.append(0.0001)
+ value.bool_value.append(True)
+ test_case.shapes.append(3)
+ test_case.shapes.append(2)
+ for _ in range(12):
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(23.5)
+ field.value.double_value.append(44.0)
+ field.value.double_value.append(3.14159)
+ field.value.double_value.append(1.414)
+ field.value.double_value.append(-32.2)
+ field.value.double_value.append(0.0001)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(False)
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(True)
+ field.value.bool_value.append(False)
+ field.value.bool_value.append(True)
+ return test_case
+
+ @staticmethod
+ def simple_test_case():
+ test_case = test_example_pb2.TestCase()
+ value = test_case.values.add()
+ value.double_value.append(23.5)
+ value.bool_value.append(True)
+ test_case.shapes.append(1)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "double_value"
+ field.dtype = types_pb2.DT_DOUBLE
+ field.value.double_value.append(23.5)
+ test_case.sizes.append(1)
+ field = test_case.fields.add()
+ field.name = "bool_value"
+ field.dtype = types_pb2.DT_BOOL
+ field.value.bool_value.append(True)
+ return test_case
diff --git a/tensorflow/contrib/proto/python/kernel_tests/test_base.py b/tensorflow/contrib/proto/python/kernel_tests/test_base.py
deleted file mode 100644
index 1fc8c16786..0000000000
--- a/tensorflow/contrib/proto/python/kernel_tests/test_base.py
+++ /dev/null
@@ -1,407 +0,0 @@
-# =============================================================================
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# =============================================================================
-"""Test case base for testing proto operations."""
-
-# Python3 preparedness imports.
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import ctypes as ct
-import os
-
-from tensorflow.contrib.proto.python.kernel_tests import test_example_pb2
-from tensorflow.core.framework import types_pb2
-from tensorflow.python.platform import test
-
-
-class ProtoOpTestBase(test.TestCase):
- """Base class for testing proto decoding and encoding ops."""
-
- def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
- super(ProtoOpTestBase, self).__init__(methodName)
- lib = os.path.join(os.path.dirname(__file__), "libtestexample.so")
- if os.path.isfile(lib):
- ct.cdll.LoadLibrary(lib)
-
- @staticmethod
- def named_parameters():
- return (
- ("defaults", ProtoOpTestBase.defaults_test_case()),
- ("minmax", ProtoOpTestBase.minmax_test_case()),
- ("nested", ProtoOpTestBase.nested_test_case()),
- ("optional", ProtoOpTestBase.optional_test_case()),
- ("promote_unsigned", ProtoOpTestBase.promote_unsigned_test_case()),
- ("ragged", ProtoOpTestBase.ragged_test_case()),
- ("shaped_batch", ProtoOpTestBase.shaped_batch_test_case()),
- ("simple", ProtoOpTestBase.simple_test_case()),
- )
-
- @staticmethod
- def defaults_test_case():
- test_case = test_example_pb2.TestCase()
- test_case.primitive.add() # No fields specified, so we get all defaults.
- test_case.shape.append(1)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "double_default"
- field.dtype = types_pb2.DT_DOUBLE
- field.expected.double_value.append(1.0)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "float_default"
- field.dtype = types_pb2.DT_FLOAT
- field.expected.float_value.append(2.0)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "int64_default"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(3)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "sfixed64_default"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(11)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "sint64_default"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(13)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "uint64_default"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(4)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "fixed64_default"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(6)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "int32_default"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(5)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "sfixed32_default"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(10)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "sint32_default"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(12)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "uint32_default"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(-1)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "fixed32_default"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(7)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "bool_default"
- field.dtype = types_pb2.DT_BOOL
- field.expected.bool_value.append(True)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "string_default"
- field.dtype = types_pb2.DT_STRING
- field.expected.string_value.append("a")
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "bytes_default"
- field.dtype = types_pb2.DT_STRING
- field.expected.string_value.append("a longer default string")
- return test_case
-
- @staticmethod
- def minmax_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- primitive.double_value.append(-1.7976931348623158e+308)
- primitive.double_value.append(2.2250738585072014e-308)
- primitive.double_value.append(1.7976931348623158e+308)
- primitive.float_value.append(-3.402823466e+38)
- primitive.float_value.append(1.175494351e-38)
- primitive.float_value.append(3.402823466e+38)
- primitive.int64_value.append(-9223372036854775808)
- primitive.int64_value.append(9223372036854775807)
- primitive.sfixed64_value.append(-9223372036854775808)
- primitive.sfixed64_value.append(9223372036854775807)
- primitive.sint64_value.append(-9223372036854775808)
- primitive.sint64_value.append(9223372036854775807)
- primitive.uint64_value.append(0)
- primitive.uint64_value.append(18446744073709551615)
- primitive.fixed64_value.append(0)
- primitive.fixed64_value.append(18446744073709551615)
- primitive.int32_value.append(-2147483648)
- primitive.int32_value.append(2147483647)
- primitive.sfixed32_value.append(-2147483648)
- primitive.sfixed32_value.append(2147483647)
- primitive.sint32_value.append(-2147483648)
- primitive.sint32_value.append(2147483647)
- primitive.uint32_value.append(0)
- primitive.uint32_value.append(4294967295)
- primitive.fixed32_value.append(0)
- primitive.fixed32_value.append(4294967295)
- primitive.bool_value.append(False)
- primitive.bool_value.append(True)
- primitive.string_value.append("")
- primitive.string_value.append("I refer to the infinite.")
- test_case.shape.append(1)
- test_case.sizes.append(3)
- field = test_case.field.add()
- field.name = "double_value"
- field.dtype = types_pb2.DT_DOUBLE
- field.expected.double_value.append(-1.7976931348623158e+308)
- field.expected.double_value.append(2.2250738585072014e-308)
- field.expected.double_value.append(1.7976931348623158e+308)
- test_case.sizes.append(3)
- field = test_case.field.add()
- field.name = "float_value"
- field.dtype = types_pb2.DT_FLOAT
- field.expected.float_value.append(-3.402823466e+38)
- field.expected.float_value.append(1.175494351e-38)
- field.expected.float_value.append(3.402823466e+38)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "int64_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(-9223372036854775808)
- field.expected.int64_value.append(9223372036854775807)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "sfixed64_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(-9223372036854775808)
- field.expected.int64_value.append(9223372036854775807)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "sint64_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(-9223372036854775808)
- field.expected.int64_value.append(9223372036854775807)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "uint64_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(0)
- field.expected.int64_value.append(-1)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "fixed64_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(0)
- field.expected.int64_value.append(-1)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "int32_value"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(-2147483648)
- field.expected.int32_value.append(2147483647)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "sfixed32_value"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(-2147483648)
- field.expected.int32_value.append(2147483647)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "sint32_value"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(-2147483648)
- field.expected.int32_value.append(2147483647)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "uint32_value"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(0)
- field.expected.int32_value.append(-1)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "fixed32_value"
- field.dtype = types_pb2.DT_INT32
- field.expected.int32_value.append(0)
- field.expected.int32_value.append(-1)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "bool_value"
- field.dtype = types_pb2.DT_BOOL
- field.expected.bool_value.append(False)
- field.expected.bool_value.append(True)
- test_case.sizes.append(2)
- field = test_case.field.add()
- field.name = "string_value"
- field.dtype = types_pb2.DT_STRING
- field.expected.string_value.append("")
- field.expected.string_value.append("I refer to the infinite.")
- return test_case
-
- @staticmethod
- def nested_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- message_value = primitive.message_value.add()
- message_value.double_value = 23.5
- test_case.shape.append(1)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "message_value"
- field.dtype = types_pb2.DT_STRING
- message_value = field.expected.message_value.add()
- message_value.double_value = 23.5
- return test_case
-
- @staticmethod
- def optional_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- primitive.bool_value.append(True)
- test_case.shape.append(1)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "bool_value"
- field.dtype = types_pb2.DT_BOOL
- field.expected.bool_value.append(True)
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "double_value"
- field.dtype = types_pb2.DT_DOUBLE
- field.expected.double_value.append(0.0)
- return test_case
-
- @staticmethod
- def promote_unsigned_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- primitive.fixed32_value.append(4294967295)
- primitive.uint32_value.append(4294967295)
- test_case.shape.append(1)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "fixed32_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(4294967295)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "uint32_value"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(4294967295)
- # Comes from an explicitly-specified default
- test_case.sizes.append(0)
- field = test_case.field.add()
- field.name = "uint32_default"
- field.dtype = types_pb2.DT_INT64
- field.expected.int64_value.append(4294967295)
- return test_case
-
- @staticmethod
- def ragged_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- primitive.double_value.append(23.5)
- primitive.double_value.append(123.0)
- primitive.bool_value.append(True)
- primitive = test_case.primitive.add()
- primitive.double_value.append(3.1)
- primitive.bool_value.append(False)
- test_case.shape.append(2)
- test_case.sizes.append(2)
- test_case.sizes.append(1)
- test_case.sizes.append(1)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "double_value"
- field.dtype = types_pb2.DT_DOUBLE
- field.expected.double_value.append(23.5)
- field.expected.double_value.append(123.0)
- field.expected.double_value.append(3.1)
- field.expected.double_value.append(0.0)
- field = test_case.field.add()
- field.name = "bool_value"
- field.dtype = types_pb2.DT_BOOL
- field.expected.bool_value.append(True)
- field.expected.bool_value.append(False)
- return test_case
-
- @staticmethod
- def shaped_batch_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- primitive.double_value.append(23.5)
- primitive.bool_value.append(True)
- primitive = test_case.primitive.add()
- primitive.double_value.append(44.0)
- primitive.bool_value.append(False)
- primitive = test_case.primitive.add()
- primitive.double_value.append(3.14159)
- primitive.bool_value.append(True)
- primitive = test_case.primitive.add()
- primitive.double_value.append(1.414)
- primitive.bool_value.append(True)
- primitive = test_case.primitive.add()
- primitive.double_value.append(-32.2)
- primitive.bool_value.append(False)
- primitive = test_case.primitive.add()
- primitive.double_value.append(0.0001)
- primitive.bool_value.append(True)
- test_case.shape.append(3)
- test_case.shape.append(2)
- for _ in range(12):
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "double_value"
- field.dtype = types_pb2.DT_DOUBLE
- field.expected.double_value.append(23.5)
- field.expected.double_value.append(44.0)
- field.expected.double_value.append(3.14159)
- field.expected.double_value.append(1.414)
- field.expected.double_value.append(-32.2)
- field.expected.double_value.append(0.0001)
- field = test_case.field.add()
- field.name = "bool_value"
- field.dtype = types_pb2.DT_BOOL
- field.expected.bool_value.append(True)
- field.expected.bool_value.append(False)
- field.expected.bool_value.append(True)
- field.expected.bool_value.append(True)
- field.expected.bool_value.append(False)
- field.expected.bool_value.append(True)
- return test_case
-
- @staticmethod
- def simple_test_case():
- test_case = test_example_pb2.TestCase()
- primitive = test_case.primitive.add()
- primitive.double_value.append(23.5)
- primitive.bool_value.append(True)
- test_case.shape.append(1)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "double_value"
- field.dtype = types_pb2.DT_DOUBLE
- field.expected.double_value.append(23.5)
- test_case.sizes.append(1)
- field = test_case.field.add()
- field.name = "bool_value"
- field.dtype = types_pb2.DT_BOOL
- field.expected.bool_value.append(True)
- return test_case
diff --git a/tensorflow/contrib/proto/python/kernel_tests/test_example.proto b/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
index a2c88e372b..674d881220 100644
--- a/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
+++ b/tensorflow/contrib/proto/python/kernel_tests/test_example.proto
@@ -1,6 +1,4 @@
// Test description and protos to work with it.
-//
-// Many of the protos in this file are for unit tests that haven't been written yet.
syntax = "proto2";
@@ -8,54 +6,27 @@ import "tensorflow/core/framework/types.proto";
package tensorflow.contrib.proto;
-// A TestCase holds a proto and a bunch of assertions
-// about how it should decode.
+// A TestCase holds a proto and assertions about how it should decode.
message TestCase {
- // A batch of primitives to be serialized and decoded.
- repeated RepeatedPrimitiveValue primitive = 1;
- // The shape of the batch.
- repeated int32 shape = 2;
+ // Batches of primitive values.
+ repeated TestValue values = 1;
+ // The batch shapes.
+ repeated int32 shapes = 2;
// Expected sizes for each field.
repeated int32 sizes = 3;
// Expected values for each field.
- repeated FieldSpec field = 4;
+ repeated FieldSpec fields = 4;
};
// FieldSpec describes the expected output for a single field.
message FieldSpec {
optional string name = 1;
optional tensorflow.DataType dtype = 2;
- optional RepeatedPrimitiveValue expected = 3;
+ optional TestValue value = 3;
};
+// NOTE: This definition must be kept in sync with PackedTestValue.
message TestValue {
- optional PrimitiveValue primitive_value = 1;
- optional EnumValue enum_value = 2;
- optional MessageValue message_value = 3;
- optional RepeatedMessageValue repeated_message_value = 4;
- optional RepeatedPrimitiveValue repeated_primitive_value = 6;
-}
-
-message PrimitiveValue {
- optional double double_value = 1;
- optional float float_value = 2;
- optional int64 int64_value = 3;
- optional uint64 uint64_value = 4;
- optional int32 int32_value = 5;
- optional fixed64 fixed64_value = 6;
- optional fixed32 fixed32_value = 7;
- optional bool bool_value = 8;
- optional string string_value = 9;
- optional bytes bytes_value = 12;
- optional uint32 uint32_value = 13;
- optional sfixed32 sfixed32_value = 15;
- optional sfixed64 sfixed64_value = 16;
- optional sint32 sint32_value = 17;
- optional sint64 sint64_value = 18;
-}
-
-// NOTE: This definition must be kept in sync with PackedPrimitiveValue.
-message RepeatedPrimitiveValue {
repeated double double_value = 1;
repeated float float_value = 2;
repeated int64 int64_value = 3;
@@ -74,30 +45,31 @@ message RepeatedPrimitiveValue {
repeated PrimitiveValue message_value = 19;
// Optional fields with explicitly-specified defaults.
- optional double double_default = 20 [default = 1.0];
- optional float float_default = 21 [default = 2.0];
- optional int64 int64_default = 22 [default = 3];
- optional uint64 uint64_default = 23 [default = 4];
- optional int32 int32_default = 24 [default = 5];
- optional fixed64 fixed64_default = 25 [default = 6];
- optional fixed32 fixed32_default = 26 [default = 7];
- optional bool bool_default = 27 [default = true];
- optional string string_default = 28 [default = "a"];
- optional bytes bytes_default = 29 [default = "a longer default string"];
- optional uint32 uint32_default = 30 [default = 4294967295];
- optional sfixed32 sfixed32_default = 31 [default = 10];
- optional sfixed64 sfixed64_default = 32 [default = 11];
- optional sint32 sint32_default = 33 [default = 12];
- optional sint64 sint64_default = 34 [default = 13];
+ optional double double_value_with_default = 20 [default = 1.0];
+ optional float float_value_with_default = 21 [default = 2.0];
+ optional int64 int64_value_with_default = 22 [default = 3];
+ optional uint64 uint64_value_with_default = 23 [default = 4];
+ optional int32 int32_value_with_default = 24 [default = 5];
+ optional fixed64 fixed64_value_with_default = 25 [default = 6];
+ optional fixed32 fixed32_value_with_default = 26 [default = 7];
+ optional bool bool_value_with_default = 27 [default = true];
+ optional string string_value_with_default = 28 [default = "a"];
+ optional bytes bytes_value_with_default = 29
+ [default = "a longer default string"];
+ optional uint32 uint32_value_with_default = 30 [default = 9];
+ optional sfixed32 sfixed32_value_with_default = 31 [default = 10];
+ optional sfixed64 sfixed64_value_with_default = 32 [default = 11];
+ optional sint32 sint32_value_with_default = 33 [default = 12];
+ optional sint64 sint64_value_with_default = 34 [default = 13];
}
-// A PackedPrimitiveValue looks exactly the same as a RepeatedPrimitiveValue
-// in the text format, but the binary serializion is different.
-// We test the packed representations by loading the same test cases
-// using this definition instead of RepeatedPrimitiveValue.
-// NOTE: This definition must be kept in sync with RepeatedPrimitiveValue
-// in every way except the packed=true declaration.
-message PackedPrimitiveValue {
+// A PackedTestValue looks exactly the same as a TestValue in the text format,
+// but the binary serializion is different. We test the packed representations
+// by loading the same test cases using this definition instead of TestValue.
+//
+// NOTE: This definition must be kept in sync with TestValue in every way except
+// the packed=true declaration.
+message PackedTestValue {
repeated double double_value = 1 [packed = true];
repeated float float_value = 2 [packed = true];
repeated int64 int64_value = 3 [packed = true];
@@ -115,23 +87,53 @@ message PackedPrimitiveValue {
repeated sint64 sint64_value = 18 [packed = true];
repeated PrimitiveValue message_value = 19;
- optional double double_default = 20 [default = 1.0];
- optional float float_default = 21 [default = 2.0];
- optional int64 int64_default = 22 [default = 3];
- optional uint64 uint64_default = 23 [default = 4];
- optional int32 int32_default = 24 [default = 5];
- optional fixed64 fixed64_default = 25 [default = 6];
- optional fixed32 fixed32_default = 26 [default = 7];
- optional bool bool_default = 27 [default = true];
- optional string string_default = 28 [default = "a"];
- optional bytes bytes_default = 29 [default = "a longer default string"];
- optional uint32 uint32_default = 30 [default = 4294967295];
- optional sfixed32 sfixed32_default = 31 [default = 10];
- optional sfixed64 sfixed64_default = 32 [default = 11];
- optional sint32 sint32_default = 33 [default = 12];
- optional sint64 sint64_default = 34 [default = 13];
+ optional double double_value_with_default = 20 [default = 1.0];
+ optional float float_value_with_default = 21 [default = 2.0];
+ optional int64 int64_value_with_default = 22 [default = 3];
+ optional uint64 uint64_value_with_default = 23 [default = 4];
+ optional int32 int32_value_with_default = 24 [default = 5];
+ optional fixed64 fixed64_value_with_default = 25 [default = 6];
+ optional fixed32 fixed32_value_with_default = 26 [default = 7];
+ optional bool bool_value_with_default = 27 [default = true];
+ optional string string_value_with_default = 28 [default = "a"];
+ optional bytes bytes_value_with_default = 29
+ [default = "a longer default string"];
+ optional uint32 uint32_value_with_default = 30 [default = 9];
+ optional sfixed32 sfixed32_value_with_default = 31 [default = 10];
+ optional sfixed64 sfixed64_value_with_default = 32 [default = 11];
+ optional sint32 sint32_value_with_default = 33 [default = 12];
+ optional sint64 sint64_value_with_default = 34 [default = 13];
}
+message PrimitiveValue {
+ optional double double_value = 1;
+ optional float float_value = 2;
+ optional int64 int64_value = 3;
+ optional uint64 uint64_value = 4;
+ optional int32 int32_value = 5;
+ optional fixed64 fixed64_value = 6;
+ optional fixed32 fixed32_value = 7;
+ optional bool bool_value = 8;
+ optional string string_value = 9;
+ optional bytes bytes_value = 12;
+ optional uint32 uint32_value = 13;
+ optional sfixed32 sfixed32_value = 15;
+ optional sfixed64 sfixed64_value = 16;
+ optional sint32 sint32_value = 17;
+ optional sint64 sint64_value = 18;
+}
+
+// Message containing fields with field numbers higher than any field above.
+// An instance of this message is prepended to each binary message in the test
+// to exercise the code path that handles fields encoded out of order of field
+// number.
+message ExtraFields {
+ optional string string_value = 1776;
+ optional bool bool_value = 1777;
+}
+
+// The messages below are for yet-to-be created tests.
+
message EnumValue {
enum Color {
RED = 0;
@@ -171,12 +173,3 @@ message RepeatedMessageValue {
repeated NestedMessageValue message_values = 11;
}
-
-// Message containing fields with field numbers higher than any field above. An
-// instance of this message is prepended to each binary message in the test to
-// exercise the code path that handles fields encoded out of order of field
-// number.
-message ExtraFields {
- optional string string_value = 1776;
- optional bool bool_value = 1777;
-}
diff --git a/tensorflow/contrib/rnn/python/ops/rnn.py b/tensorflow/contrib/rnn/python/ops/rnn.py
index 2f0caadda3..0266b72dcb 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn.py
@@ -175,7 +175,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs: Output `Tensor` shaped:
- `batch_size, max_time, layers_output]`. Where layers_output
+ `[batch_size, max_time, layers_output]`. Where layers_output
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
of the forward rnn.
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/BUILD b/tensorflow/contrib/rpc/python/kernel_tests/BUILD
index 2311c15a68..cb0b89ae55 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/BUILD
+++ b/tensorflow/contrib/rpc/python/kernel_tests/BUILD
@@ -1,5 +1,3 @@
-# TODO(b/76425722): Port everything in here to OS (currently excluded).
-
package(default_visibility = ["//visibility:public"])
licenses(["notice"]) # Apache 2.0
@@ -17,7 +15,6 @@ tf_proto_library(
srcs = ["test_example.proto"],
has_services = 1,
cc_api_version = 2,
- protodeps = ["//tensorflow/core:protos_all"],
)
py_library(
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
index 27273d16b1..1c23c28860 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
+++ b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_base.py
@@ -51,23 +51,23 @@ class RpcOpTestBase(object):
def testScalarHostPortRpc(self):
with self.test_session() as sess:
request_tensors = (
- test_example_pb2.TestCase(shape=[1, 2, 3]).SerializeToString())
+ test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, ())
response_values = sess.run(response_tensors)
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
- self.assertAllEqual([2, 3, 4], response_message.shape)
+ self.assertAllEqual([2, 3, 4], response_message.values)
def testScalarHostPortTryRpc(self):
with self.test_session() as sess:
request_tensors = (
- test_example_pb2.TestCase(shape=[1, 2, 3]).SerializeToString())
+ test_example_pb2.TestCase(values=[1, 2, 3]).SerializeToString())
response_tensors, status_code, status_message = self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(status_code.shape, ())
@@ -77,7 +77,7 @@ class RpcOpTestBase(object):
sess.run((response_tensors, status_code, status_message)))
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values))
- self.assertAllEqual([2, 3, 4], response_message.shape)
+ self.assertAllEqual([2, 3, 4], response_message.values)
# For the base Rpc op, don't expect to get error status back.
self.assertEqual(errors.OK, status_code_values)
self.assertEqual(b'', status_message_values)
@@ -86,7 +86,7 @@ class RpcOpTestBase(object):
with self.test_session() as sess:
request_tensors = []
response_tensors = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertAllEqual(response_tensors.shape, [0])
@@ -95,7 +95,7 @@ class RpcOpTestBase(object):
def testInvalidMethod(self):
for method in [
- '/InvalidService.IncrementTestShapes',
+ '/InvalidService.Increment',
self.get_method_name('InvalidMethodName')
]:
with self.test_session() as sess:
@@ -115,12 +115,12 @@ class RpcOpTestBase(object):
with self.assertRaises(errors.UnavailableError):
sess.run(
self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=address,
request=''))
_, status_code_value, status_message_value = sess.run(
self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=address,
request=''))
self.assertEqual(errors.UNAVAILABLE, status_code_value)
@@ -182,10 +182,10 @@ class RpcOpTestBase(object):
with self.test_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
- shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+ values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
self.assertEqual(response_tensors.shape, (20,))
@@ -194,17 +194,17 @@ class RpcOpTestBase(object):
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
- self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+ self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortManyParallelRpcs(self):
with self.test_session() as sess:
request_tensors = [
test_example_pb2.TestCase(
- shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+ values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
many_response_tensors = [
self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors) for _ in range(10)
]
@@ -216,25 +216,25 @@ class RpcOpTestBase(object):
for i in range(20):
response_message = test_example_pb2.TestCase()
self.assertTrue(response_message.ParseFromString(response_values[i]))
- self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+ self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
def testVecHostPortRpcUsingEncodeAndDecodeProto(self):
with self.test_session() as sess:
request_tensors = encode_proto_op.encode_proto(
message_type='tensorflow.contrib.rpc.TestCase',
- field_names=['shape'],
+ field_names=['values'],
sizes=[[3]] * 20,
values=[
[[i, i + 1, i + 2] for i in range(20)],
])
response_tensor_strings = self.rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=self._address,
request=request_tensors)
_, (response_shape,) = decode_proto_op.decode_proto(
bytes=response_tensor_strings,
message_type='tensorflow.contrib.rpc.TestCase',
- field_names=['shape'],
+ field_names=['values'],
output_types=[dtypes.int32])
response_shape_values = sess.run(response_shape)
self.assertAllEqual([[i + 1, i + 2, i + 3]
@@ -285,9 +285,9 @@ class RpcOpTestBase(object):
addresses = flatten([[
self._address, 'unix:/tmp/this_unix_socket_doesnt_exist_97820348!!@'
] for _ in range(10)])
- request = test_example_pb2.TestCase(shape=[0, 1, 2]).SerializeToString()
+ request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=addresses,
request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -303,9 +303,9 @@ class RpcOpTestBase(object):
flatten = lambda x: list(itertools.chain.from_iterable(x))
with self.test_session() as sess:
methods = flatten(
- [[self.get_method_name('IncrementTestShapes'), 'InvalidMethodName']
+ [[self.get_method_name('Increment'), 'InvalidMethodName']
for _ in range(10)])
- request = test_example_pb2.TestCase(shape=[0, 1, 2]).SerializeToString()
+ request = test_example_pb2.TestCase(values=[0, 1, 2]).SerializeToString()
response_tensors, status_code, _ = self.try_rpc(
method=methods, address=self._address, request=request)
response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -325,10 +325,10 @@ class RpcOpTestBase(object):
] for _ in range(10)])
requests = [
test_example_pb2.TestCase(
- shape=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
+ values=[i, i + 1, i + 2]).SerializeToString() for i in range(20)
]
response_tensors, status_code, _ = self.try_rpc(
- method=self.get_method_name('IncrementTestShapes'),
+ method=self.get_method_name('Increment'),
address=addresses,
request=requests)
response_tensors_values, status_code_values = sess.run((response_tensors,
@@ -343,4 +343,4 @@ class RpcOpTestBase(object):
response_message = test_example_pb2.TestCase()
self.assertTrue(
response_message.ParseFromString(response_tensors_values[i]))
- self.assertAllEqual([i + 1, i + 2, i + 3], response_message.shape)
+ self.assertAllEqual([i + 1, i + 2, i + 3], response_message.values)
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
index 7cbd636cb1..265254aa51 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
+++ b/tensorflow/contrib/rpc/python/kernel_tests/rpc_op_test_servicer.py
@@ -30,8 +30,8 @@ from tensorflow.contrib.rpc.python.kernel_tests import test_example_pb2_grpc
class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
"""Test servicer for RpcOp tests."""
- def IncrementTestShapes(self, request, context):
- """Increment the entries in the shape attribute of request.
+ def Increment(self, request, context):
+ """Increment the entries in the `values` attribute of request.
Args:
request: input TestCase.
@@ -40,8 +40,8 @@ class RpcOpTestServicer(test_example_pb2_grpc.TestCaseServiceServicer):
Returns:
output TestCase.
"""
- for i in range(len(request.shape)):
- request.shape[i] += 1
+ for i in range(len(request.values)):
+ request.values[i] += 1
return request
def AlwaysFailWithInvalidArgument(self, request, context):
diff --git a/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto b/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
index 96f4550f62..8141466349 100644
--- a/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
+++ b/tensorflow/contrib/rpc/python/kernel_tests/test_example.proto
@@ -1,29 +1,17 @@
// Test description and protos to work with it.
-//
-// Many of the protos in this file are for unit tests that haven't been written yet.
syntax = "proto2";
-import "tensorflow/core/framework/types.proto";
-
package tensorflow.contrib.rpc;
-// A TestCase holds a proto and a bunch of assertions
-// about how it should decode.
+// A TestCase holds a sequence of values.
message TestCase {
- // A batch of primitives to be serialized and decoded.
- repeated RepeatedPrimitiveValue primitive = 1;
- // The shape of the batch.
- repeated int32 shape = 2;
- // Expected sizes for each field.
- repeated int32 sizes = 3;
- // Expected values for each field.
- repeated FieldSpec field = 4;
+ repeated int32 values = 1;
};
service TestCaseService {
- // Copy input, and increment each entry in 'shape' by 1.
- rpc IncrementTestShapes(TestCase) returns (TestCase) {
+ // Copy input, and increment each entry in 'values' by 1.
+ rpc Increment(TestCase) returns (TestCase) {
}
// Sleep forever.
@@ -42,130 +30,3 @@ service TestCaseService {
rpc SometimesFailWithInvalidArgument(TestCase) returns (TestCase) {
}
};
-
-// FieldSpec describes the expected output for a single field.
-message FieldSpec {
- optional string name = 1;
- optional tensorflow.DataType dtype = 2;
- optional RepeatedPrimitiveValue expected = 3;
-};
-
-message TestValue {
- optional PrimitiveValue primitive_value = 1;
- optional EnumValue enum_value = 2;
- optional MessageValue message_value = 3;
- optional RepeatedMessageValue repeated_message_value = 4;
- optional RepeatedPrimitiveValue repeated_primitive_value = 6;
-}
-
-message PrimitiveValue {
- optional double double_value = 1;
- optional float float_value = 2;
- optional int64 int64_value = 3;
- optional uint64 uint64_value = 4;
- optional int32 int32_value = 5;
- optional fixed64 fixed64_value = 6;
- optional fixed32 fixed32_value = 7;
- optional bool bool_value = 8;
- optional string string_value = 9;
- optional bytes bytes_value = 12;
- optional uint32 uint32_value = 13;
- optional sfixed32 sfixed32_value = 15;
- optional sfixed64 sfixed64_value = 16;
- optional sint32 sint32_value = 17;
- optional sint64 sint64_value = 18;
-}
-
-// NOTE: This definition must be kept in sync with PackedPrimitiveValue.
-message RepeatedPrimitiveValue {
- repeated double double_value = 1;
- repeated float float_value = 2;
- repeated int64 int64_value = 3;
- repeated uint64 uint64_value = 4;
- repeated int32 int32_value = 5;
- repeated fixed64 fixed64_value = 6;
- repeated fixed32 fixed32_value = 7;
- repeated bool bool_value = 8;
- repeated string string_value = 9;
- repeated bytes bytes_value = 12;
- repeated uint32 uint32_value = 13;
- repeated sfixed32 sfixed32_value = 15;
- repeated sfixed64 sfixed64_value = 16;
- repeated sint32 sint32_value = 17;
- repeated sint64 sint64_value = 18;
- repeated PrimitiveValue message_value = 19;
-}
-
-// A PackedPrimitiveValue looks exactly the same as a RepeatedPrimitiveValue
-// in the text format, but the binary serializion is different.
-// We test the packed representations by loading the same test cases
-// using this definition instead of RepeatedPrimitiveValue.
-// NOTE: This definition must be kept in sync with RepeatedPrimitiveValue
-// in every way except the packed=true declaration.
-message PackedPrimitiveValue {
- repeated double double_value = 1 [packed = true];
- repeated float float_value = 2 [packed = true];
- repeated int64 int64_value = 3 [packed = true];
- repeated uint64 uint64_value = 4 [packed = true];
- repeated int32 int32_value = 5 [packed = true];
- repeated fixed64 fixed64_value = 6 [packed = true];
- repeated fixed32 fixed32_value = 7 [packed = true];
- repeated bool bool_value = 8 [packed = true];
- repeated string string_value = 9;
- repeated bytes bytes_value = 12;
- repeated uint32 uint32_value = 13 [packed = true];
- repeated sfixed32 sfixed32_value = 15 [packed = true];
- repeated sfixed64 sfixed64_value = 16 [packed = true];
- repeated sint32 sint32_value = 17 [packed = true];
- repeated sint64 sint64_value = 18 [packed = true];
- repeated PrimitiveValue message_value = 19;
-}
-
-message EnumValue {
- enum Color {
- RED = 0;
- ORANGE = 1;
- YELLOW = 2;
- GREEN = 3;
- BLUE = 4;
- INDIGO = 5;
- VIOLET = 6;
- };
- optional Color enum_value = 14;
- repeated Color repeated_enum_value = 15;
-}
-
-
-message InnerMessageValue {
- optional float float_value = 2;
- repeated bytes bytes_values = 8;
-}
-
-message MiddleMessageValue {
- repeated int32 int32_values = 5;
- optional InnerMessageValue message_value = 11;
- optional uint32 uint32_value = 13;
-}
-
-message MessageValue {
- optional double double_value = 1;
- optional MiddleMessageValue message_value = 11;
-}
-
-message RepeatedMessageValue {
- message NestedMessageValue {
- optional float float_value = 2;
- repeated bytes bytes_values = 8;
- }
-
- repeated NestedMessageValue message_values = 11;
-}
-
-// Message containing fields with field numbers higher than any field above. An
-// instance of this message is prepended to each binary message in the test to
-// exercise the code path that handles fields encoded out of order of field
-// number.
-message ExtraFields {
- optional string string_value = 1776;
- optional bool bool_value = 1777;
-}
diff --git a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
index 178328619f..4073b390fc 100644
--- a/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
+++ b/tensorflow/contrib/seq2seq/python/kernel_tests/beam_search_decoder_test.py
@@ -132,6 +132,48 @@ class TestGatherTree(test.TestCase):
def test_gather_tree_from_array_2d(self):
self._test_gather_tree_from_array(depth_ndims=2)
+ def test_gather_tree_from_array_complex_trajectory(self):
+ # Max. time = 7, batch = 1, beam = 5.
+ array = np.expand_dims(np.array(
+ [[[25, 12, 114, 89, 97]],
+ [[9, 91, 64, 11, 162]],
+ [[34, 34, 34, 34, 34]],
+ [[2, 4, 2, 2, 4]],
+ [[2, 3, 6, 2, 2]],
+ [[2, 2, 2, 3, 2]],
+ [[2, 2, 2, 2, 2]]]), -1)
+ parent_ids = np.array(
+ [[[0, 0, 0, 0, 0]],
+ [[0, 0, 0, 0, 0]],
+ [[0, 1, 2, 3, 4]],
+ [[0, 0, 1, 2, 1]],
+ [[0, 1, 1, 2, 3]],
+ [[0, 1, 3, 1, 2]],
+ [[0, 1, 2, 3, 4]]])
+ expected_array = np.expand_dims(np.array(
+ [[[25, 25, 25, 25, 25]],
+ [[9, 9, 91, 9, 9]],
+ [[34, 34, 34, 34, 34]],
+ [[2, 4, 2, 4, 4]],
+ [[2, 3, 6, 3, 6]],
+ [[2, 2, 2, 3, 2]],
+ [[2, 2, 2, 2, 2]]]), -1)
+ sequence_length = [[4, 6, 4, 7, 6]]
+
+ array = ops.convert_to_tensor(
+ array, dtype=dtypes.float32)
+ parent_ids = ops.convert_to_tensor(
+ parent_ids, dtype=dtypes.int32)
+ expected_array = ops.convert_to_tensor(
+ expected_array, dtype=dtypes.float32)
+
+ sorted_array = beam_search_decoder.gather_tree_from_array(
+ array, parent_ids, sequence_length)
+
+ with self.test_session() as sess:
+ sorted_array, expected_array = sess.run([sorted_array, expected_array])
+ self.assertAllEqual(expected_array, sorted_array)
+
class TestArrayShapeChecks(test.TestCase):
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index c7fbeea310..f17dbb0fe3 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -145,24 +145,20 @@ def gather_tree_from_array(t, parent_ids, sequence_length):
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
- mask = array_ops.sequence_mask(
- sequence_length, maxlen=max_time, dtype=dtypes.int32)
- mask = array_ops.transpose(mask, perm=[2, 0, 1])
-
- # Use beam_width + 1 to mark the end of beam.
- masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
-
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
- step_ids=masked_beam_ids,
+ step_ids=beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
+ in_bound_steps = array_ops.transpose(
+ array_ops.sequence_mask(sequence_length, maxlen=max_time),
+ perm=[2, 0, 1])
sorted_beam_ids = array_ops.where(
- math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
+ in_bound_steps, x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
diff --git a/tensorflow/contrib/signal/python/kernel_tests/mel_ops_test.py b/tensorflow/contrib/signal/python/kernel_tests/mel_ops_test.py
index 345eb6cfaa..f4348e80ea 100644
--- a/tensorflow/contrib/signal/python/kernel_tests/mel_ops_test.py
+++ b/tensorflow/contrib/signal/python/kernel_tests/mel_ops_test.py
@@ -53,7 +53,8 @@ def spectrogram_to_mel_matrix(num_mel_bins=20,
num_spectrogram_bins=129,
audio_sample_rate=8000,
lower_edge_hertz=125.0,
- upper_edge_hertz=3800.0):
+ upper_edge_hertz=3800.0,
+ unused_dtype=None):
"""Return a matrix that can post-multiply spectrogram rows to make mel.
Copied from
@@ -132,9 +133,9 @@ class LinearToMelTest(test.TestCase):
# lower_edge_hertz, upper_edge_hertz) to test.
configs = [
# Defaults.
- (20, 129, 8000.0, 125.0, 3800.0),
+ (20, 129, 8000.0, 125.0, 3800.0, dtypes.float64),
# Settings used by Tacotron (https://arxiv.org/abs/1703.10135).
- (80, 1025, 24000.0, 80.0, 12000.0)
+ (80, 1025, 24000.0, 80.0, 12000.0, dtypes.float64)
]
with self.test_session(use_gpu=True):
for config in configs:
@@ -143,7 +144,8 @@ class LinearToMelTest(test.TestCase):
self.assertAllClose(mel_matrix_np, mel_matrix.eval(), atol=3e-6)
def test_dtypes(self):
- for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
+ # LinSpace is not supported for tf.float16.
+ for dtype in (dtypes.bfloat16, dtypes.float32, dtypes.float64):
self.assertEqual(dtype,
mel_ops.linear_to_mel_weight_matrix(dtype=dtype).dtype)
@@ -167,7 +169,8 @@ class LinearToMelTest(test.TestCase):
def test_constant_folding(self):
"""Mel functions should be constant foldable."""
- for dtype in (dtypes.float16, dtypes.float32, dtypes.float64):
+ # TODO(rjryan): tf.bloat16 cannot be constant folded by Grappler.
+ for dtype in (dtypes.float32, dtypes.float64):
g = ops.Graph()
with g.as_default():
mel_matrix = mel_ops.linear_to_mel_weight_matrix(dtype=dtype)
diff --git a/tensorflow/contrib/signal/python/ops/mel_ops.py b/tensorflow/contrib/signal/python/ops/mel_ops.py
index 1e84006116..062d84aea1 100644
--- a/tensorflow/contrib/signal/python/ops/mel_ops.py
+++ b/tensorflow/contrib/signal/python/ops/mel_ops.py
@@ -151,22 +151,21 @@ def linear_to_mel_weight_matrix(num_mel_bins=20,
_validate_arguments(num_mel_bins, sample_rate,
lower_edge_hertz, upper_edge_hertz, dtype)
- # To preserve accuracy, we compute the matrix at float64 precision and then
- # cast to `dtype` at the end. This function can be constant folded by graph
- # optimization since there are no Tensor inputs.
+ # This function can be constant folded by graph optimization since there are
+ # no Tensor inputs.
sample_rate = ops.convert_to_tensor(
- sample_rate, dtypes.float64, name='sample_rate')
+ sample_rate, dtype, name='sample_rate')
lower_edge_hertz = ops.convert_to_tensor(
- lower_edge_hertz, dtypes.float64, name='lower_edge_hertz')
+ lower_edge_hertz, dtype, name='lower_edge_hertz')
upper_edge_hertz = ops.convert_to_tensor(
- upper_edge_hertz, dtypes.float64, name='upper_edge_hertz')
- zero_float64 = ops.convert_to_tensor(0.0, dtypes.float64)
+ upper_edge_hertz, dtype, name='upper_edge_hertz')
+ zero = ops.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sample_rate / 2.0
linear_frequencies = math_ops.linspace(
- zero_float64, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
+ zero, nyquist_hertz, num_spectrogram_bins)[bands_to_zero:]
spectrogram_bins_mel = array_ops.expand_dims(
_hertz_to_mel(linear_frequencies), 1)
@@ -193,11 +192,8 @@ def linear_to_mel_weight_matrix(num_mel_bins=20,
# Intersect the line segments with each other and zero.
mel_weights_matrix = math_ops.maximum(
- zero_float64, math_ops.minimum(lower_slopes, upper_slopes))
+ zero, math_ops.minimum(lower_slopes, upper_slopes))
# Re-add the zeroed lower bins we sliced out above.
- mel_weights_matrix = array_ops.pad(
- mel_weights_matrix, [[bands_to_zero, 0], [0, 0]])
-
- # Cast to the desired type.
- return math_ops.cast(mel_weights_matrix, dtype, name=name)
+ return array_ops.pad(
+ mel_weights_matrix, [[bands_to_zero, 0], [0, 0]], name=name)
diff --git a/tensorflow/contrib/summary/summary_ops_test.py b/tensorflow/contrib/summary/summary_ops_test.py
index 3e41e3d0b4..4d1807130c 100644
--- a/tensorflow/contrib/summary/summary_ops_test.py
+++ b/tensorflow/contrib/summary/summary_ops_test.py
@@ -20,6 +20,8 @@ import os
import tempfile
import time
+import sqlite3
+
import numpy as np
import six
@@ -275,6 +277,22 @@ class EagerFileTest(test_util.TensorFlowTestCase):
class EagerDbTest(summary_test_util.SummaryDbTest):
+ def testDbURIOpen(self):
+ tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite')
+ tmpdb_uri = six.moves.urllib_parse.urljoin("file:", tmpdb_path)
+ tmpdb_writer = summary_ops.create_db_writer(
+ tmpdb_uri,
+ "experimentA",
+ "run1",
+ "user1")
+ with summary_ops.always_record_summaries():
+ with tmpdb_writer.as_default():
+ summary_ops.scalar('t1', 2.0)
+ tmpdb = sqlite3.connect(tmpdb_path)
+ num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"')
+ self.assertEqual(num, 1)
+ tmpdb.close()
+
def testIntegerSummaries(self):
step = training_util.create_global_step()
writer = self.create_db_writer()
diff --git a/tensorflow/contrib/tensor_forest/BUILD b/tensorflow/contrib/tensor_forest/BUILD
index 136856c015..164f3e58e6 100644
--- a/tensorflow/contrib/tensor_forest/BUILD
+++ b/tensorflow/contrib/tensor_forest/BUILD
@@ -223,7 +223,6 @@ tf_kernel_library(
":model_ops_lib",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
],
alwayslink = 1,
)
@@ -319,7 +318,6 @@ tf_kernel_library(
":stats_ops_lib",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
],
alwayslink = 1,
)
diff --git a/tensorflow/contrib/tensorrt/BUILD b/tensorflow/contrib/tensorrt/BUILD
index adda0b758b..2fe1f2c242 100644
--- a/tensorflow/contrib/tensorrt/BUILD
+++ b/tensorflow/contrib/tensorrt/BUILD
@@ -11,7 +11,6 @@ exports_files(["LICENSE"])
load(
"//tensorflow:tensorflow.bzl",
- "py_test",
"tf_cc_test",
"tf_copts",
"tf_cuda_library",
@@ -20,6 +19,7 @@ load(
"tf_gen_op_libs",
"tf_gen_op_wrapper_py",
)
+load("//tensorflow:tensorflow.bzl", "cuda_py_tests")
load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_test")
load("//tensorflow:tensorflow.bzl", "tf_custom_op_py_library")
load("//tensorflow:tensorflow.bzl", "tf_py_wrap_cc")
@@ -33,8 +33,8 @@ tf_cuda_cc_test(
size = "small",
srcs = ["tensorrt_test.cc"],
tags = [
- "manual",
- "notap",
+ "no_windows",
+ "nomac",
],
deps = [
"//tensorflow/core:lib",
@@ -83,6 +83,7 @@ cc_library(
copts = tf_copts(),
visibility = ["//visibility:public"],
deps = [
+ ":trt_allocator",
":trt_logging",
":trt_plugins",
":trt_resources",
@@ -156,6 +157,7 @@ py_library(
],
srcs_version = "PY2AND3",
deps = [
+ ":tf_trt_integration_test_base",
":trt_convert_py",
":trt_ops_py",
"//tensorflow/python:errors",
@@ -185,6 +187,9 @@ tf_py_wrap_cc(
name = "wrap_conversion",
srcs = ["trt_conversion.i"],
copts = tf_copts(),
+ swig_includes = [
+ "//tensorflow/python:platform/base.i",
+ ],
deps = [
":trt_conversion",
":trt_engine_op_kernel",
@@ -195,17 +200,16 @@ tf_py_wrap_cc(
tf_cuda_library(
name = "trt_resources",
srcs = [
- "resources/trt_allocator.cc",
"resources/trt_int8_calibrator.cc",
"resources/trt_resource_manager.cc",
],
hdrs = [
- "resources/trt_allocator.h",
"resources/trt_int8_calibrator.h",
"resources/trt_resource_manager.h",
"resources/trt_resources.h",
],
deps = [
+ ":trt_allocator",
":trt_logging",
":utils",
"//tensorflow/core:framework_headers_lib",
@@ -216,6 +220,34 @@ tf_cuda_library(
]),
)
+tf_cuda_library(
+ name = "trt_allocator",
+ srcs = ["resources/trt_allocator.cc"],
+ hdrs = ["resources/trt_allocator.h"],
+ deps = [
+ "//tensorflow/core:framework_headers_lib",
+ "//tensorflow/core:framework_lite",
+ "//tensorflow/core:lib_proto_parsing",
+ ] + if_tensorrt([
+ "@local_config_tensorrt//:nv_infer",
+ ]),
+)
+
+tf_cc_test(
+ name = "trt_allocator_test",
+ size = "small",
+ srcs = ["resources/trt_allocator_test.cc"],
+ tags = [
+ "no_windows",
+ "nomac",
+ ],
+ deps = [
+ ":trt_allocator",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ ],
+)
+
# Library for the node-level conversion portion of TensorRT operation creation
tf_cuda_library(
name = "trt_conversion",
@@ -231,6 +263,7 @@ tf_cuda_library(
],
deps = [
":segment",
+ ":trt_allocator",
":trt_plugins",
":trt_logging",
":trt_resources",
@@ -275,13 +308,21 @@ tf_cc_test(
name = "segment_test",
size = "small",
srcs = ["segment/segment_test.cc"],
+ tags = [
+ "no_windows",
+ "nomac",
+ ],
deps = [
":segment",
- "//tensorflow/c:c_api",
+ "//tensorflow/cc:cc_ops",
+ "//tensorflow/cc:scope",
+ "//tensorflow/core:core_cpu",
"//tensorflow/core:lib",
+ "//tensorflow/core:ops",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
],
)
@@ -311,8 +352,9 @@ tf_cuda_cc_test(
size = "small",
srcs = ["plugin/trt_plugin_factory_test.cc"],
tags = [
- "manual",
- "notap",
+ "no_cuda_on_cpu_tap",
+ "no_windows",
+ "nomac",
],
deps = [
":trt_plugins",
@@ -325,23 +367,47 @@ tf_cuda_cc_test(
]),
)
-py_test(
+py_library(
+ name = "tf_trt_integration_test_base",
+ srcs = ["test/tf_trt_integration_test_base.py"],
+ deps = [
+ ":trt_convert_py",
+ ":trt_ops_py",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_test_lib",
+ ],
+)
+
+cuda_py_tests(
name = "tf_trt_integration_test",
- srcs = ["test/tf_trt_integration_test.py"],
- main = "test/tf_trt_integration_test.py",
- srcs_version = "PY2AND3",
- tags = [
- "manual",
- "notap",
+ srcs = [
+ "test/base_test.py",
+ # "test/batch_matmul_test.py",
+ # "test/biasadd_matmul_test.py",
+ "test/binary_tensor_weight_broadcast_test.py",
+ "test/concatenation_test.py",
+ "test/const_broadcast_test.py",
+ "test/multi_connection_neighbor_engine_test.py",
+ "test/neighboring_engine_test.py",
+ "test/unary_test.py",
+ # "test/vgg_block_nchw_test.py",
+ # "test/vgg_block_test.py",
],
- deps = [
- ":init_py",
+ additional_deps = [
+ ":tf_trt_integration_test_base",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_test_lib",
],
+ tags = [
+ "no_cuda_on_cpu_tap",
+ "no_windows",
+ "nomac",
+ ],
)
cc_library(
name = "utils",
+ srcs = ["convert/utils.cc"],
hdrs = ["convert/utils.h"],
+ copts = tf_copts(),
)
diff --git a/tensorflow/contrib/tensorrt/convert/convert_graph.cc b/tensorflow/contrib/tensorrt/convert/convert_graph.cc
index 5bfc7f9109..3383f6bc9b 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_graph.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_graph.cc
@@ -86,27 +86,48 @@ bool IsTensorRTCandidate(const tensorflow::Node* node) {
// TODO(jie): Segmentation shouldn't associated with op name.
// Split it into a registration for each kernel.
static const std::set<string> candidate_ops = {
- "Identity",
- "Snapshot",
- "Const",
- "Conv2D",
- "MaxPool",
- "BiasAdd",
- "Relu",
- "Add",
- "Mul",
- "Sub",
- "Rsqrt",
- "Pad",
- "Mean",
- "AvgPool",
- "ConcatV2",
- "DepthwiseConv2dNative",
- "FusedBatchNorm",
- "FusedBatchNormV2",
- // TODO(ben,jie): ...
+ "Identity",
+ "Snapshot",
+ "Const",
+ "Conv2D",
+ "MaxPool",
+ "BiasAdd",
+ "Relu",
+ "Add",
+ "Mul",
+ "Sub",
+ "Rsqrt",
+ "Pad",
+ "Mean",
+ "AvgPool",
+ "ConcatV2",
+ "DepthwiseConv2dNative",
+ "FusedBatchNorm",
+ "FusedBatchNormV2",
+ "Div",
+ "RealDiv",
+ "Rsqrt",
+ "Reciprocal",
+ "Exp",
+ "Log",
+ "Sqrt",
+ "Abs",
+ "Neg",
+#if NV_TENSORRT_MAJOR > 3
+ "MatMul",
+ "BatchMatMul",
+ "Softmax",
+ "Minimum",
+ "Maximum",
+ "TopKV2",
+ "Sum",
+ "Prod",
+ "Max",
+ "Min",
+#endif
+ // TODO(ben,jie): ...
};
- // LINT.ThenChange(//tensorflow/contrib/tensorrt/convert/convert_nodes.h)
+ // LINT.ThenChange(//tensorflow/contrib/tensorrt/convert/convert_nodes.cc)
return (candidate_ops.count(node->type_string()) ||
PluginFactoryTensorRT::GetInstance()->IsPlugin(node->type_string()));
}
@@ -248,6 +269,7 @@ tensorflow::Status GetEngineInfo(
const std::vector<tensorflow::Node*>& reverse_topo_order,
EngineInfo* info) {
std::vector<int> subgraph_node_ids;
+ std::set<int> added_const_node_ids; // Used to prevent double insertion.
std::set<string> segment_devices;
int input_port = 0;
int output_port = 0;
@@ -257,6 +279,7 @@ tensorflow::Status GetEngineInfo(
// edge, thus there must not be any duplicates since source nodes of
// input/output edges must be in different split of the graph.
// TODO(aaroey): consider using node id and port instead.
+ // TODO(aaroey): using topo order instead of reverting reverse topo order.
std::unordered_map<string, int> created_edges;
for (auto it = reverse_topo_order.rbegin(); it != reverse_topo_order.rend();
++it) {
@@ -275,19 +298,22 @@ tensorflow::Status GetEngineInfo(
<< " neither have requested device nor assigned device";
}
}
- int node_id = node->id();
- subgraph_node_ids.push_back(node_id);
+ const int node_id = node->id();
for (const auto edge : node->in_edges()) {
auto input_node = edge->src();
- if (segment_nodes.count(input_node->name()) == 0) {
+ if (segment_nodes.count(input_node->name()) == 0 &&
+ !edge->IsControlEdge() && !input_node->IsSource()) {
// Add constant input node into the segment. We don't care if it has
// other output edges going into other engines or TF nodes. Since we add
// it only to the subsegment node list, not the subsegment itself, it
// won't be removed from the graph. If it doesn't have any edges, TF
// will prune it out.
if (input_node->type_string() == "Const") {
- subgraph_node_ids.push_back(input_node->id());
- } else if (!edge->IsControlEdge() && !input_node->IsSource()) {
+ if (added_const_node_ids.count(input_node->id()) == 0) {
+ added_const_node_ids.insert(input_node->id());
+ subgraph_node_ids.push_back(input_node->id());
+ }
+ } else {
string s(input_node->name());
StrAppend(&s, ":", edge->src_output());
VLOG(1) << "Input edge = " << s;
@@ -304,6 +330,9 @@ tensorflow::Status GetEngineInfo(
}
}
}
+ // We need to add possible const input nodes before adding this node in
+ // order to keep the topological order.
+ subgraph_node_ids.push_back(node_id);
for (const auto edge : node->out_edges()) {
auto output_node = edge->dst();
if (segment_nodes.count(output_node->name()) == 0 &&
@@ -350,9 +379,9 @@ tensorflow::Status CreateTRTNode(tensorflow::Graph* graph,
nvinfer1::IGpuAllocator* alloc,
int max_batch_size) {
const auto& info = infos.at(pos);
- std::vector<tensorflow::TensorShapeProto> out_shapes;
- std::vector<tensorflow::TensorShapeProto> input_shapes;
- std::vector<tensorflow::PartialTensorShape> shapes;
+ std::vector<tensorflow::TensorShapeProto> output_shape_protos;
+ std::vector<tensorflow::TensorShapeProto> input_shape_protos;
+ std::vector<tensorflow::PartialTensorShape> input_shapes;
std::vector<tensorflow::NodeDefBuilder::NodeOut> inputs;
std::vector<tensorflow::DataType> out_types;
VLOG(1) << "Processing " << info.engine_name;
@@ -365,11 +394,11 @@ tensorflow::Status CreateTRTNode(tensorflow::Graph* graph,
tensorflow::TensorShapeProto out_shape;
// shape of the output node inside segment
conn.inside_shape.AsProto(&out_shape);
- if (out_shapes.size() <= conn.port_number) {
- out_shapes.resize(conn.port_number + 1);
+ if (output_shape_protos.size() <= conn.port_number) {
+ output_shape_protos.resize(conn.port_number + 1);
out_types.resize(conn.port_number + 1);
}
- out_shapes.at(conn.port_number) = out_shape;
+ output_shape_protos.at(conn.port_number) = out_shape;
out_types.at(conn.port_number) = conn.connection_type;
continue;
}
@@ -377,12 +406,12 @@ tensorflow::Status CreateTRTNode(tensorflow::Graph* graph,
// Set the shapes and data types of input edge.
tensorflow::TensorShapeProto in_shape;
conn.outside_shape.AsProto(&in_shape);
- if (input_shapes.size() <= conn.port_number) {
+ if (input_shape_protos.size() <= conn.port_number) {
+ input_shape_protos.resize(conn.port_number + 1);
input_shapes.resize(conn.port_number + 1);
- shapes.resize(conn.port_number + 1);
}
- input_shapes.at(conn.port_number) = in_shape;
- shapes.at(conn.port_number) = conn.outside_shape;
+ input_shape_protos.at(conn.port_number) = in_shape;
+ input_shapes.at(conn.port_number) = conn.outside_shape;
string input_node = conn.outside_node_name;
int input_port = conn.outside_port;
@@ -410,6 +439,8 @@ tensorflow::Status CreateTRTNode(tensorflow::Graph* graph,
VLOG(1) << "Engine Input " << input_node << ":" << input_port << " -> "
<< info.engine_name << ":" << inputs.size();
// Skip duplicate inputs.
+ // TODO(aaroey): use std::find instead. GetEngineInfo already remove
+ // duplicate connections, so here we should never find any duplicate?
bool new_input = true;
for (const auto& inp : inputs) {
if (inp.node == input_node && inp.index == input_port) {
@@ -437,8 +468,8 @@ tensorflow::Status CreateTRTNode(tensorflow::Graph* graph,
TF_RETURN_IF_ERROR(ConvertGraphDefToEngine(
info.segment_graph_def,
info.precision_mode == INT8MODE ? FP32MODE : info.precision_mode,
- max_batch_size, info.max_workspace_size_bytes, shapes, &trt_logger,
- alloc, /*calibrator=*/nullptr, &engine,
+ max_batch_size, info.max_workspace_size_bytes, input_shapes,
+ &trt_logger, alloc, /*calibrator=*/nullptr, &engine,
/*convert_successfully=*/nullptr));
TrtUniquePtrType<nvinfer1::IHostMemory> engine_data(engine->serialize());
segment_string =
@@ -486,8 +517,8 @@ tensorflow::Status CreateTRTNode(tensorflow::Graph* graph,
}
tensorflow::NodeDef trt_node;
tensorflow::Status status =
- node_builder.Attr("input_shapes", input_shapes)
- .Attr("output_shapes", out_shapes)
+ node_builder.Attr("input_shapes", input_shape_protos)
+ .Attr("output_shapes", output_shape_protos)
.Attr("static_engine",
info.engine_type == EngineInfo::EngineType::TRTStatic)
.Attr("segment_funcdef_name",
@@ -596,7 +627,9 @@ tensorflow::Status RegisterSegmentFunctionToFunctionLibrary(
edge->src()->output_type(edge->src_output()));
VLOG(1) << " input " << nout.node << ":" << nout.index
<< " dtype=" << tensorflow::DataTypeString(nout.data_type);
- node_builder.Input({nout});
+ // nvcc complains that Input(<brace-enclosed initializer list>) is
+ // ambiguous, so do not use Input({nout}).
+ node_builder.Input(nout);
TF_RETURN_IF_ERROR(node_builder.Attr("T", node->output_type(0))
.Attr("index", i)
.Finalize(&nd));
@@ -704,6 +737,7 @@ std::pair<int, tensorflow::Allocator*> GetDeviceAndAllocator(
}
// Entry function from optimization pass.
+// TODO(aaeory): parameter should use pointer type.
tensorflow::Status ConvertAfterShapes(ConversionParams& params) {
// Convert graphdef to graph.
tensorflow::FunctionLibraryDefinition flib(tensorflow::OpRegistry::Global(),
@@ -721,7 +755,8 @@ tensorflow::Status ConvertAfterShapes(ConversionParams& params) {
segment_options.minimum_segment_size = params.minimum_segment_size;
tensorflow::tensorrt::segment::SegmentNodesVector initial_segments;
TF_RETURN_IF_ERROR(tensorrt::segment::SegmentGraph(
- &graph, IsTensorRTCandidate, segment_options, &initial_segments));
+ &graph, IsTensorRTCandidate, InputEdgeValidator(*params.graph_properties),
+ OutputEdgeValidator(), segment_options, &initial_segments));
if (initial_segments.size() > 1) {
VLOG(0) << "MULTIPLE tensorrt candidate conversion: "
<< initial_segments.size();
@@ -801,7 +836,7 @@ tensorflow::Status ConvertAfterShapes(ConversionParams& params) {
// The allocator is used to build the engine. The build and the built engine
// will be destroyed after we get the serialized engine string, so it's fine
// to use unique_ptr here.
- std::unique_ptr<nvinfer1::IGpuAllocator> alloc;
+ std::unique_ptr<TRTBaseAllocator> alloc;
auto device_alloc = GetDeviceAndAllocator(params, engine);
int cuda_device_id = 0;
if (device_alloc.first >= 0) {
diff --git a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
index 146b9c7344..451d6fe698 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
+++ b/tensorflow/contrib/tensorrt/convert/convert_nodes.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/contrib/tensorrt/convert/convert_nodes.h"
#include <algorithm>
+#include <cstring>
#include <list>
#include <map>
#include <memory>
@@ -49,15 +50,34 @@ limitations under the License.
#if GOOGLE_TENSORRT
#include "tensorrt/include/NvInfer.h"
-// Check if the types are equal. Cast to int first so that failure log message
-// would work!
-#define CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
+// Check if the types are equal. Cast to int first so that failure log message
+// would work!
+#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
+
+#define TFTRT_INTERNAL_ERROR_AT_NODE(node) \
+ do { \
+ return tensorflow::errors::Internal( \
+ "TFTRT::", __FUNCTION__, "failed to add TRT layer, at: ", node); \
+ } while (0)
+
+#define TFTRT_RETURN_ERROR_IF_FALSE(status, node) \
+ do { \
+ if (status == false) { \
+ TFTRT_INTERNAL_ERROR_AT_NODE(node); \
+ } \
+ } while (0)
+
+#define TFTRT_RETURN_ERROR_IF_NULLPTR(ptr, node) \
+ do { \
+ if (ptr == nullptr) { \
+ TFTRT_INTERNAL_ERROR_AT_NODE(node); \
+ } \
+ } while (0)
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::tensorflow::str_util::Split;
-
using ::tensorflow::strings::StrAppend;
using ::tensorflow::strings::StrCat;
@@ -75,13 +95,163 @@ inline tensorflow::Status ConvertDType(tensorflow::DataType tf_dtype,
case tensorflow::DataType::DT_HALF:
*trt_dtype = nvinfer1::DataType::kHALF;
break;
+#if NV_TENSORRT_MAJOR > 3
+ case tensorflow::DataType::DT_INT32:
+ *trt_dtype = nvinfer1::DataType::kINT32;
+ break;
+#endif
default:
return tensorflow::errors::InvalidArgument(
- "Unsupported data type " + tensorflow::DataTypeString(tf_dtype));
+ "Unsupported data type ", tensorflow::DataTypeString(tf_dtype));
}
return tensorflow::Status::OK();
}
+void GetInputProperties(const grappler::GraphProperties& graph_properties,
+ const Node* outside_node, const int out_port,
+ PartialTensorShape* shape,
+ tensorflow::DataType* dtype) {
+ if (graph_properties.HasOutputProperties(outside_node->name())) {
+ auto output_params =
+ graph_properties.GetOutputProperties(outside_node->name());
+ auto out_shape = output_params.at(out_port);
+ *dtype = out_shape.dtype();
+ *shape = out_shape.shape();
+ } else {
+ VLOG(0) << "Unknown output shape" << outside_node->name();
+ *dtype = outside_node->output_type(out_port);
+ }
+}
+
+void GetOutputProperties(const grappler::GraphProperties& graph_properties,
+ const Node* outside_node, const int in_port,
+ PartialTensorShape* shape,
+ tensorflow::DataType* dtype) {
+ if (graph_properties.HasInputProperties(outside_node->name())) {
+ auto input_params =
+ graph_properties.GetInputProperties(outside_node->name());
+ auto in_shape = input_params.at(in_port);
+ *dtype = in_shape.dtype();
+ *shape = in_shape.shape();
+ } else {
+ *dtype = outside_node->input_type(in_port);
+ }
+}
+
+tensorflow::Status ValidateInputProperties(const PartialTensorShape& shape,
+ const tensorflow::DataType dtype,
+ nvinfer1::DataType* trt_dtype) {
+ // TODO(aaroey): some of these checks also apply to IsTensorRTCandidate(), so
+ // put them there instead.
+ TF_RETURN_IF_ERROR(ConvertDType(dtype, trt_dtype));
+ if (shape.dims() < 0) {
+ return tensorflow::errors::InvalidArgument("Input tensor rank is unknown.");
+ }
+ if (shape.dims() > 9) {
+ return tensorflow::errors::OutOfRange(
+ "Input tensor rank is greater than 8.");
+ }
+ for (int d = 1; d < shape.dims(); ++d) {
+ if (shape.dim_size(d) < 0) {
+ return tensorflow::errors::InvalidArgument(
+ "Input tensor has a unknown non-batch dimemension at dim ", d);
+ }
+ }
+ return Status::OK();
+}
+
+// Return whether or not the broadcast is feasible;
+bool TensorRTGetBroadcastShape(const nvinfer1::Dims& operand_l,
+ const bool operand_l_is_tensor,
+ const nvinfer1::Dims& operand_r,
+ const bool operand_r_is_tensor,
+ nvinfer1::Dims* operand_l_new_shape,
+ nvinfer1::Dims* operand_r_new_shape) {
+ // ***************************************************************************
+ // TensorRT Elementwise op supports broadcast but requires both tensor to be
+ // of Identical rank
+ //
+ // We consider case of:
+ // 1. operand_l to be a Tensor & operand_r to be a Const;
+ // 2. operand_l to be a Tensor & operand_r to be a Tensor;
+ // note: const op const (constant folding) should fallback to TensorFlow
+ //
+ // broadcast scheme:
+ // T: 1 3 5 (tensor would not have batch dimension)
+ // W: 1 1 3 1 (weight would have all explicit dimensions)
+ // i. fill in explicit dimensions
+ // -> T: -1 1 3 5 (we put a -1 for batch dimension)
+ // -> W: 1 1 3 1
+ // ii. compare broadcast feasibility
+ //
+ // We cannot support the following since TensorRT does not allow manipulation
+ // on batch dimension, we cannot generate output with proper shape
+ // T: 3 5 1
+ // W: 1 1 1 1 3 5 1
+ // -> T: 1 1 1 -1 3 5 1
+ // -> W: 1 1 1 1 3 5 1
+ // ***************************************************************************
+ const int max_nb_dims = nvinfer1::Dims::MAX_DIMS + 1;
+ const size_t element_size = sizeof(operand_l.d[0]);
+
+ // fill in dimensions
+ int l_s[max_nb_dims];
+ std::fill(l_s, l_s + max_nb_dims, 1);
+ int l_d = operand_l_is_tensor ? operand_l.nbDims + 1 : operand_l.nbDims;
+ int r_s[max_nb_dims];
+ std::fill(r_s, r_s + max_nb_dims, 1);
+ int r_d = operand_r_is_tensor ? operand_r.nbDims + 1 : operand_r.nbDims;
+
+ int max_d = std::max(l_d, r_d);
+ std::memcpy(l_s + max_d - operand_l.nbDims, operand_l.d,
+ operand_l.nbDims * element_size);
+ std::memcpy(r_s + max_d - operand_r.nbDims, operand_r.d,
+ operand_r.nbDims * element_size);
+
+ // set -1 for batch dimension, since batch size is not supposed to be
+ // broadcasted
+ if (operand_l_is_tensor) {
+ if (max_d != l_d) { // if broadcast beyond batch dimension, fail
+ return false;
+ }
+ l_s[0] = -1;
+ }
+ if (operand_r_is_tensor) {
+ if (max_d != r_d) { // if broadcast beyond batch dimension, fail
+ return false;
+ }
+ r_s[0] = -1;
+ }
+
+ // compare broadcast feasibility
+ for (int i = max_d - 1; i >= 0; i--) {
+ if ((l_s[i] != r_s[i]) && (l_s[i] != 1) && (r_s[i] != 1)) {
+ return false;
+ }
+ }
+
+ // output new TensorRT Dimension (stripping the batch dimension)
+ operand_l_new_shape->nbDims = max_d - 1;
+ std::memcpy(operand_l_new_shape->d, l_s + 1, (max_d - 1) * element_size);
+ operand_r_new_shape->nbDims = max_d - 1;
+ std::memcpy(operand_r_new_shape->d, r_s + 1, (max_d - 1) * element_size);
+
+ return true;
+}
+
+inline bool DimsEqual(const nvinfer1::Dims& dim_l,
+ const nvinfer1::Dims& dim_r) {
+ if (dim_l.nbDims != dim_r.nbDims) {
+ return false;
+ }
+ for (int i = 0; i < dim_l.nbDims; i++) {
+ if (dim_l.d[i] != dim_r.d[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
inline nvinfer1::Dims GetTensorShape(const tensorflow::Tensor& tensor) {
nvinfer1::Dims dims;
dims.nbDims = tensor.dims();
@@ -91,7 +261,7 @@ inline nvinfer1::Dims GetTensorShape(const tensorflow::Tensor& tensor) {
return dims;
}
-inline int64_t GetShapeSize(nvinfer1::Dims shape) {
+inline int64_t GetShapeSize(const nvinfer1::Dims& shape) {
// Returns total number of elements in shape
int64_t count = 1;
for (int d = 0; d < shape.nbDims; ++d) {
@@ -104,7 +274,7 @@ static std::vector<std::pair<int, int>> CreateSamePadding(
const nvinfer1::DimsHW& stride, const nvinfer1::DimsHW& kernel,
const std::vector<int64_t>& input_dims) {
std::vector<std::pair<int, int>> padding(input_dims.size());
- CHECK_EQ((size_t)stride.nbDims, input_dims.size()); // TODO(jie): N+C? NC+?
+ CHECK_EQ(stride.nbDims, input_dims.size()); // TODO(jie): N+C? NC+?
for (size_t i = 0; i < input_dims.size(); ++i) {
// Formula to calculate the padding
@@ -134,6 +304,7 @@ string GetCommonNameScope(const string& op_name_a, const string& op_name_b) {
return op_name_a.substr(0, last_scope_separator);
}
+// Class to convert TF weight to TRT weight.
class TRT_ShapedWeights {
public:
TRT_ShapedWeights(tensorflow::DataType type, const void* values,
@@ -145,12 +316,14 @@ class TRT_ShapedWeights {
explicit TRT_ShapedWeights(tensorflow::DataType type)
: shape_(), type_(type), values_(nullptr), empty_weight_flag_(true) {}
+ // TODO(aaroey): use rvalue reference.
TRT_ShapedWeights(const TRT_ShapedWeights& rhs)
: shape_(rhs.shape_),
type_(rhs.type_),
values_(rhs.values_),
empty_weight_flag_(rhs.empty_weight_flag_) {}
+ // TODO(aaroey): use GetShapeSize() instead.
int64_t count() const {
int64_t c = 1;
for (int i = 0; i < shape_.nbDims; i++) c *= shape_.d[i];
@@ -168,6 +341,7 @@ class TRT_ShapedWeights {
const void* GetValues() const { return values_; }
+ // TODO(aaroey): get rid of this method.
void SetValues(const void* values) { values_ = values; }
size_t size_bytes() const {
@@ -178,10 +352,12 @@ class TRT_ShapedWeights {
// Default converter
operator nvinfer1::Weights() const { return GetWeightsForTRT(); }
+ // TODO(aaroey): make these private.
nvinfer1::Dims shape_;
tensorflow::DataType type_;
private:
+ // TODO(aaroey): this should not be const as it's always from TRTWeightStore.
const void* values_;
bool empty_weight_flag_;
};
@@ -192,6 +368,7 @@ class TRT_TensorOrWeights {
: tensor_(tensor), weights_(DT_FLOAT), variant_(TRT_NODE_TENSOR) {}
explicit TRT_TensorOrWeights(const TRT_ShapedWeights& weights)
: tensor_(nullptr), weights_(weights), variant_(TRT_NODE_WEIGHTS) {}
+ // TODO(aaroey): use rvalue reference.
TRT_TensorOrWeights(const TRT_TensorOrWeights& rhs)
: tensor_(rhs.tensor_), weights_(rhs.weights_), variant_(rhs.variant_) {}
~TRT_TensorOrWeights() {}
@@ -200,19 +377,19 @@ class TRT_TensorOrWeights {
bool is_weights() const { return variant_ == TRT_NODE_WEIGHTS; }
nvinfer1::ITensor* tensor() {
- CHECK_EQ(is_tensor(), true);
+ CHECK(is_tensor());
return tensor_;
}
const nvinfer1::ITensor* tensor() const {
- CHECK_EQ(is_tensor(), true);
+ CHECK(is_tensor());
return tensor_;
}
TRT_ShapedWeights& weights() {
- CHECK_EQ(is_weights(), true);
+ CHECK(is_weights());
return weights_;
}
const TRT_ShapedWeights& weights() const {
- CHECK_EQ(is_weights(), true);
+ CHECK(is_weights());
return weights_;
}
nvinfer1::Dims shape() const {
@@ -236,21 +413,25 @@ class TFAttrs {
attrs_.insert({attr.first, &attr.second});
}
}
- bool count(string key) const { return attrs_.count(key); }
- tensorflow::AttrValue const* at(string key) const {
+
+ bool count(const string& key) const { return attrs_.count(key); }
+
+ tensorflow::AttrValue const* at(const string& key) const {
if (!attrs_.count(key)) {
LOG(FATAL) << "Attribute not found: " << key;
}
return attrs_.at(key);
}
+
template <typename T>
T get(const string& key) const;
+
template <typename T>
T get(const string& key, const T& default_value) const {
return attrs_.count(key) ? this->get<T>(key) : default_value;
}
- std::vector<string> GetAllAttrKey() {
+ std::vector<string> GetAllAttrKeys() const {
std::vector<string> attr_list;
for (const auto& attr_item : attrs_) {
attr_list.emplace_back(attr_item.first);
@@ -285,15 +466,6 @@ std::vector<string> TFAttrs::get<std::vector<string>>(const string& key) const {
auto attr = this->at(key)->list().s();
return std::vector<string>(attr.begin(), attr.end());
}
-template <>
-nvinfer1::Dims TFAttrs::get<nvinfer1::Dims>(const string& key) const {
- auto values = this->get<std::vector<int>>(key);
- nvinfer1::Dims dims;
- dims.nbDims = values.size();
- std::copy(values.begin(), values.end(), dims.d);
- // Note: No dimension type information is included
- return dims;
-}
template <>
nvinfer1::DataType TFAttrs::get<nvinfer1::DataType>(const string& key) const {
@@ -319,10 +491,11 @@ bool TFAttrs::get<bool>(const string& key) const {
}
// TODO(jie): reorder4 & reorder2 should be merged?
+// TODO(aaroey): fix the order of parameters.
template <typename T>
-void Reorder4(nvinfer1::DimsNCHW shape, const T* idata,
- nvinfer1::DimsNCHW istrides, T* odata,
- nvinfer1::DimsNCHW ostrides) {
+void Reorder4(const nvinfer1::DimsNCHW& shape, const T* idata,
+ const nvinfer1::DimsNCHW& istrides, T* odata,
+ const nvinfer1::DimsNCHW& ostrides) {
for (int n = 0; n < shape.n(); ++n) {
for (int c = 0; c < shape.c(); ++c) {
for (int h = 0; h < shape.h(); ++h) {
@@ -337,12 +510,13 @@ void Reorder4(nvinfer1::DimsNCHW shape, const T* idata,
}
template <typename T>
-void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
- T* odata, nvinfer1::DimsHW ostrides) {
+void Reorder2(const nvinfer1::DimsHW& shape, const T* idata,
+ const nvinfer1::DimsHW& istrides, T* odata,
+ const nvinfer1::DimsHW& ostrides) {
for (int h = 0; h < shape.h(); ++h) {
for (int w = 0; w < shape.w(); ++w) {
odata[h * ostrides.h() + w * ostrides.w()] =
- idata[h * ostrides.h() + w * ostrides.w()];
+ idata[h * istrides.h() + w * istrides.w()];
}
}
}
@@ -350,16 +524,17 @@ void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides,
// TODO(jie): fallback to tensorflow!!
void ReorderCKtoKC(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights) {
- int c = iweights.shape_.d[0];
- int k = iweights.shape_.d[1];
+ const int c = iweights.shape_.d[0];
+ const int k = iweights.shape_.d[1];
oweights->shape_.d[0] = k;
oweights->shape_.d[1] = c;
- nvinfer1::DimsHW istrides = {1, k};
- nvinfer1::DimsHW ostrides = {c, 1};
+ const nvinfer1::DimsHW istrides = {1, k};
+ const nvinfer1::DimsHW ostrides = {c, 1};
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
Reorder2({k, c}, static_cast<float const*>(iweights.GetValues()),
istrides,
+ // TODO(aaroey): get rid of all the const_cast like this.
static_cast<float*>(const_cast<void*>(oweights->GetValues())),
ostrides);
break;
@@ -382,21 +557,24 @@ void ReorderRSCKToKCRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, int num_groups) {
CHECK_EQ(iweights.type_, oweights->type_);
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
- int r = iweights.shape_.d[0];
- int s = iweights.shape_.d[1];
- // TRT requires GKcRS, while TF depthwise has RSCK
- // where c=1, C=G
+ // K indexes over output channels, C over input channels, and R and S over the
+ // height and width of the convolution
+ const int r = iweights.shape_.d[0];
+ const int s = iweights.shape_.d[1];
+ // TRT requires GKcRS, while TF depthwise has RSCK where c=1, C=G
VLOG(2) << "num_groups: " << num_groups;
- int c = iweights.shape_.d[2] / num_groups;
+ const int c = iweights.shape_.d[2] / num_groups;
VLOG(2) << "c" << iweights.shape_.d[2] << " then " << c;
- int k = iweights.shape_.d[3] * num_groups;
+ const int k = iweights.shape_.d[3] * num_groups;
VLOG(2) << "k" << iweights.shape_.d[3] << " then " << k;
+ VLOG(2) << "r" << iweights.shape_.d[0] << " then " << r;
+ VLOG(2) << "s" << iweights.shape_.d[1] << " then " << s;
oweights->shape_.d[0] = k / num_groups;
oweights->shape_.d[1] = c * num_groups;
oweights->shape_.d[2] = r;
oweights->shape_.d[3] = s;
- nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
- nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
+ const nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
+ const nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
Reorder4({k, c, r, s}, static_cast<float const*>(iweights.GetValues()),
@@ -428,11 +606,14 @@ using OpConverter =
std::vector<TRT_TensorOrWeights>*)>;
class Converter {
+ // TODO(aaroey): fix the order of members.
std::unordered_map<string, TRT_TensorOrWeights> trt_tensors_;
std::unordered_map<string, OpConverter> op_registry_;
OpConverter plugin_converter_;
nvinfer1::INetworkDefinition* trt_network_;
std::list<std::vector<uint8_t>> temp_bufs_;
+ // TODO(aaroey): inline the definition of TRTWeightStore here, and add APIs to
+ // operate the stored weights instead of operating it directly.
TRTWeightStore* weight_store_;
bool fp16_;
void register_op_converters();
@@ -440,7 +621,7 @@ class Converter {
std::vector<TRT_TensorOrWeights>* inputs) {
for (auto const& input_name : node_def.input()) {
/*************************************************************************
- * TODO(jie) handle case 1) here
+ * TODO(jie): handle case 1) here.
* Normalizes the inputs and extracts associated metadata:
* 1) Inputs can contain a colon followed by a suffix of characters.
* That suffix may be a single number (e.g. inputName:1) or several
@@ -454,6 +635,7 @@ class Converter {
if (input_name[0] == '^') continue;
string name = input_name;
auto first = name.find_first_of(':');
+ // TODO(aaroey): why removing the colon but not the zero? A bug?
if (first != string::npos && first + 2 == name.size() &&
name[first + 1] == '0')
name.erase(first);
@@ -462,12 +644,13 @@ class Converter {
if (trt_tensors_.count(name)) {
inputs->push_back(trt_tensors_.at(name));
} else {
- string str("Node ");
- StrAppend(&str, node_def.name(), " should have an input named '", name,
+ // TODO(aaroey): this should not happen, make it a CHECK.
+ // TODO(aaroey): use StrCat for pattern like this.
+ string msg("Node ");
+ StrAppend(&msg, node_def.name(), " should have an input named '", name,
"' but it is not available");
- LOG(WARNING) << "input: " << name << " not available for node at "
- << node_def.name();
- return tensorflow::errors::InvalidArgument(str);
+ LOG(ERROR) << msg;
+ return tensorflow::errors::InvalidArgument(msg);
}
}
return tensorflow::Status::OK();
@@ -488,6 +671,7 @@ class Converter {
weights.SetValues(weight_store_->store_.back().data());
return weights;
}
+ // TODO(aaroey): fix all the namings.
bool isFP16() { return fp16_; }
TRT_ShapedWeights get_temp_weights_like(const TRT_ShapedWeights& weights) {
return this->get_temp_weights(weights.type_, weights.shape_);
@@ -496,9 +680,10 @@ class Converter {
tensorflow::Status convert_node(const tensorflow::NodeDef& node_def) {
std::vector<TRT_TensorOrWeights> inputs;
TF_RETURN_IF_ERROR(this->get_inputs(node_def, &inputs));
- string op = node_def.op();
+ const string& op = node_def.op();
std::vector<TRT_TensorOrWeights> outputs;
if (PluginFactoryTensorRT::GetInstance()->IsPlugin(op)) {
+ // TODO(aaroey): plugin_converter_ is not set, fix it.
TF_RETURN_IF_ERROR(plugin_converter_(*this, node_def, inputs, &outputs));
} else {
if (!op_registry_.count(op)) {
@@ -509,7 +694,7 @@ class Converter {
TF_RETURN_IF_ERROR(op_converter(*this, node_def, inputs, &outputs));
}
for (size_t i = 0; i < outputs.size(); ++i) {
- TRT_TensorOrWeights output = outputs.at(i);
+ TRT_TensorOrWeights& output = outputs[i];
// TODO(jie): tf protobuf seems to be omitting the :0 suffix
string output_name = node_def.name();
if (i != 0) output_name = StrCat(output_name, ":", i);
@@ -527,26 +712,29 @@ class Converter {
nvinfer1::INetworkDefinition* network() { return trt_network_; }
- TRT_TensorOrWeights get_tensor(string name) {
+ TRT_TensorOrWeights get_tensor(const string& name) {
if (!trt_tensors_.count(name)) {
return TRT_TensorOrWeights(nullptr);
}
return trt_tensors_.at(name);
}
- bool insert_input_tensor(string name, nvinfer1::ITensor* tensor) {
+ bool insert_input_tensor(const string& name, nvinfer1::ITensor* tensor) {
return trt_tensors_.insert({name, TRT_TensorOrWeights(tensor)}).second;
}
nvinfer1::ITensor* TransposeTensor(nvinfer1::ITensor* input_tensor,
- std::vector<int> order) {
- auto dims = input_tensor->getDimensions();
+ const std::vector<int>& order) {
+ const auto dims = input_tensor->getDimensions();
// TODO(jie): change the return to status and properly exit
if (order.size() - 1 != size_t(dims.nbDims))
LOG(ERROR) << "Dimension does not match, fail gracefully";
nvinfer1::IShuffleLayer* layer = this->network()->addShuffle(*input_tensor);
+ if (layer == nullptr) {
+ return nullptr;
+ }
nvinfer1::Permutation permutation;
for (int32_t i = 0; i < dims.nbDims; ++i) {
permutation.order[i] = order[i + 1] - 1;
@@ -577,13 +765,14 @@ TRT_ShapedWeights ConvertFP32ToFP16(Converter& ctx,
}
return weights;
}
+
// ****************************************************************************
// Constant folding functions
// TODO(jie): once optimizer kicks in, we should have done constant folding
// there.
-//*****************************************************************************/
+// *****************************************************************************
struct LambdaFactory {
- enum class OP_CATEGORY : int { RSQRT = 0, NEG, ADD, MUL, SUB };
+ enum class OP_CATEGORY : int { RSQRT = 0, NEG, ADD, MUL, SUB, RECIP };
OP_CATEGORY op;
template <typename T>
@@ -595,6 +784,8 @@ struct LambdaFactory {
}
case OP_CATEGORY::NEG:
return [](T t) -> T { return -t; };
+ case OP_CATEGORY::RECIP:
+ return [](T t) -> T { return 1.0 / t; };
default:
VLOG(2) << "Not supported op for unary: " << static_cast<int>(op);
return nullptr;
@@ -628,7 +819,6 @@ struct LambdaFactory {
VLOG(2) << "LAMBDA VAL : " << val;
return l + val;
};
- // Return [val](T l)-> T {return l+val;};
case OP_CATEGORY::SUB:
return [val](T l) -> T {
VLOG(2) << "LAMBDA VAL : " << val;
@@ -688,11 +878,13 @@ std::function<Eigen::half(Eigen::half)> LambdaFactory::unary<Eigen::half>() {
}
case OP_CATEGORY::NEG:
return [](Eigen::half t) -> Eigen::half { return -t; };
+ // TODO(aaroey): can we support RECIP?
default:
VLOG(2) << "Not supported op for unary: " << static_cast<int>(op);
return nullptr;
}
}
+
tensorflow::Status UnaryCompute(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights,
LambdaFactory unary_op) {
@@ -738,6 +930,7 @@ tensorflow::Status BinaryCompute(const TRT_ShapedWeights& iweights_l,
if (iweights_l.count() != iweights_r.count()) {
// We only supports broadcast of RankZero
if (iweights_l.count() == 1) {
+ // TODO(aaroey): Remove loggings like this.
VLOG(2) << "I bet it is not working!" << (*inp_l);
std::transform(inp_r, inp_r + iweights_r.count(), oup,
binary_op.broadcast_l<float>(*inp_l));
@@ -790,117 +983,21 @@ tensorflow::Status BinaryCompute(const TRT_ShapedWeights& iweights_l,
return tensorflow::Status::OK();
}
-tensorflow::Status ConstantFoldUnary(
- Converter& ctx, const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- TRT_ShapedWeights weights_input = inputs.at(0).weights();
-
- // Allocate output weights
- TRT_ShapedWeights weights_output = ctx.get_temp_weights_like(weights_input);
-
- // FIXME assume type matches input weights
- // Get trt type & shape
- // Maybe this part has to be moved into the block of rsqrt later
- // Check type consistency
- CHECK_EQ(weights_input.type_,
- TFAttrs(node_def).get<tensorflow::DataType>("T"));
-
- LambdaFactory unary_op;
- if (node_def.op() == "Rsqrt") {
- // Compute rsqrt
- unary_op.op = LambdaFactory::OP_CATEGORY::RSQRT;
- auto ret = UnaryCompute(weights_input, &weights_output, unary_op);
- // Pass the output
- if (ret == tensorflow::Status::OK()) {
- outputs->push_back(TRT_TensorOrWeights(weights_output));
- }
- return ret;
- } else {
- return tensorflow::errors::Unimplemented("Binary op not supported: " +
- node_def.op());
- }
-}
-
-// TODO(jie,ben) broadcast is needed yet not implemented
-// Let's get the simple stuff working first. Maybe we should fall back to TF
-// approach for constant folding
-tensorflow::Status ConstantFoldBinary(
- Converter& ctx, const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- TRT_ShapedWeights weights_input_l = inputs.at(0).weights();
- TRT_ShapedWeights weights_input_r = inputs.at(1).weights();
-
- // Check type consistency
- CHECK_EQ(weights_input_l.type_, weights_input_r.type_);
-
- if (weights_input_l.shape_.nbDims != weights_input_r.shape_.nbDims)
- return tensorflow::errors::Unimplemented(
- "Binary op implicit broadcast not supported: " + node_def.op());
-
- // TODO(jie): constant fold should really fall back to TF.
- int num_dims = weights_input_l.shape_.nbDims;
- nvinfer1::Dims output_shape;
- output_shape.nbDims = num_dims;
- VLOG(2) << "nb_dims: " << num_dims
- << ", the other: " << weights_input_r.shape_.nbDims;
- for (int i = 0; i < num_dims; i++) {
- if (weights_input_l.shape_.d[i] == weights_input_r.shape_.d[i]) {
- output_shape.d[i] = weights_input_l.shape_.d[i];
- } else if (weights_input_l.shape_.d[i] == 1 ||
- weights_input_r.shape_.d[i] == 1) {
- output_shape.d[i] =
- std::max(weights_input_l.shape_.d[i], weights_input_r.shape_.d[i]);
- } else {
- return tensorflow::errors::Unimplemented(
- "Binary op with incompatible shape at, " + node_def.op());
- }
- VLOG(2) << "left: " << weights_input_l.shape_.d[i]
- << "right: " << weights_input_r.shape_.d[i]
- << "output: " << output_shape.d[i];
- }
-
- // FIXME assume type matches input weights
- // Get trt type & shape
- TFAttrs attrs(node_def);
- // Maybe this part has to be moved into the block of rsqrt later
- tensorflow::DataType dtype = attrs.get<tensorflow::DataType>("T");
-
- // Allocate output weights
- TRT_ShapedWeights weights_output = ctx.get_temp_weights(dtype, output_shape);
-
- LambdaFactory binary_op;
- if (node_def.op() == "Sub") {
- binary_op.op = LambdaFactory::OP_CATEGORY::SUB;
- } else if (node_def.op() == "Mul") {
- binary_op.op = LambdaFactory::OP_CATEGORY::MUL;
- } else if (node_def.op() == "Add") {
- binary_op.op = LambdaFactory::OP_CATEGORY::ADD;
- } else {
- return tensorflow::errors::Unimplemented("Binary op not supported: " +
- node_def.op());
- }
- auto ret = BinaryCompute(weights_input_l, weights_input_r, &weights_output,
- binary_op);
-
- // Pass the output
- if (ret == tensorflow::Status::OK()) {
- outputs->push_back(TRT_TensorOrWeights(weights_output));
- }
-
- return ret;
-}
-
// TODO(jie): broadcast is needed yet not implemented.
// Only implemented channel wise for the time being
tensorflow::Status BinaryTensorOpWeight(
Converter& ctx, const tensorflow::NodeDef& node_def,
const nvinfer1::ITensor* tensor, TRT_ShapedWeights weights,
- std::vector<TRT_TensorOrWeights>* outputs) {
- // FIXME assume type matches input weights
- // Get trt type & shape
- // Maybe this part has to be moved into the block of rsqrt later
+ bool swapped_inputs, std::vector<TRT_TensorOrWeights>* outputs) {
+ // tensor is the left operand while weights is the right operand;
+ // when swapped_inputs set to true, those two are swapped.
+ // TODO(aaroey): use a set.
+ if (node_def.op() != "Sub" && node_def.op() != "Add" &&
+ node_def.op() != "Mul" && node_def.op() != "Div" &&
+ node_def.op() != "RealDiv") {
+ return tensorflow::errors::Unimplemented(
+ "op not supported: " + node_def.op() + ", at: " + node_def.name());
+ }
// Check type consistency
nvinfer1::DataType ttype;
@@ -910,6 +1007,12 @@ tensorflow::Status BinaryTensorOpWeight(
auto dims_w = weights.shape_;
auto dims_t = tensor->getDimensions();
+ // TODO(jie): addScale checks for input tensor dimension
+ if (dims_t.nbDims != 3) {
+ return tensorflow::errors::InvalidArgument(
+ "addScale requires tensor with rank 3, " + node_def.name());
+ }
+
// default to element-wise
auto scale_mode = nvinfer1::ScaleMode::kELEMENTWISE;
@@ -980,6 +1083,7 @@ tensorflow::Status BinaryTensorOpWeight(
permutation[dims_t.nbDims] = 1;
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
permutation);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
} else {
return tensorflow::errors::InvalidArgument(
"Transpose cannot be applied, " + node_def.name());
@@ -997,11 +1101,35 @@ tensorflow::Status BinaryTensorOpWeight(
// Maybe I should do a switch
if (node_def.op() == "Sub") {
- TRT_ShapedWeights neg_weights = ctx.get_temp_weights_like(weights);
- LambdaFactory unary_op;
- unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
- TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
- shift_weights = neg_weights;
+ if (swapped_inputs) {
+ shift_weights = weights;
+ nvinfer1::IUnaryLayer* layer =
+ ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kNEG);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ tensor = layer->getOutput(0);
+ } else {
+ TRT_ShapedWeights neg_weights = ctx.get_temp_weights_like(weights);
+ LambdaFactory unary_op;
+ unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
+ TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
+ shift_weights = neg_weights;
+ }
+ } else if (node_def.op() == "Div" || node_def.op() == "RealDiv") {
+ if (swapped_inputs) {
+ scale_weights = weights;
+ nvinfer1::IUnaryLayer* layer =
+ ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kRECIP);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ tensor = layer->getOutput(0);
+ } else {
+ TRT_ShapedWeights recip_weights = ctx.get_temp_weights_like(weights);
+ LambdaFactory unary_op;
+ unary_op.op = LambdaFactory::OP_CATEGORY::RECIP;
+ TF_RETURN_IF_ERROR(UnaryCompute(weights, &recip_weights, unary_op));
+ scale_weights = recip_weights;
+ }
} else if (node_def.op() == "Mul") {
scale_weights = weights;
} else if (node_def.op() == "Add") {
@@ -1014,11 +1142,13 @@ tensorflow::Status BinaryTensorOpWeight(
nvinfer1::IScaleLayer* layer = ctx.network()->addScale(
*const_cast<nvinfer1::ITensor*>(tensor), scale_mode, shift_weights,
scale_weights, power_weights);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// transpose back dimension
if (permutation_flag) {
output_tensor = ctx.TransposeTensor(output_tensor, permutation);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
// Pass the output
@@ -1042,20 +1172,31 @@ tensorflow::Status ConvertConv2DHelper(
if (data_format == "NHWC") {
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
{0, 3, 1, 2});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
h_index = 1;
w_index = 2;
// TODO(jie): transpose it
}
// tensor after transpose (NCHW)
- auto tensor_dim = tensor->getDimensions();
+ const auto tensor_dim = tensor->getDimensions();
int num_groups = group;
- if (num_groups == 0) // depthwise convolution
- num_groups = tensor_dim.d[0];
+ if (num_groups == 0) num_groups = tensor_dim.d[0]; // depthwise convolution
VLOG(2) << "groups count: " << num_groups;
TRT_ShapedWeights weights_rsck = inputs.at(1).weights();
+
+ VLOG(2) << "weight shape: " << weights_rsck.shape_.nbDims;
+ for (int i = 0; i < weights_rsck.shape_.nbDims; i++) {
+ VLOG(2) << weights_rsck.shape_.d[i];
+ }
+
+ if (weights_rsck.shape_.nbDims != 4) {
+ return tensorflow::errors::Internal(
+ "Conv2D expects kernel of dimension 4, at: " + node_def.name());
+ }
+
if (ctx.isFP16()) {
weights_rsck = ConvertFP32ToFP16(ctx, inputs.at(1).weights());
}
@@ -1063,18 +1204,22 @@ tensorflow::Status ConvertConv2DHelper(
TRT_ShapedWeights weights = ctx.get_temp_weights_like(weights_rsck);
ReorderRSCKToKCRS(weights_rsck, &weights, num_groups);
TRT_ShapedWeights biases(weights.type_);
- int noutput = weights.shape_.d[0] * num_groups;
+ const int noutput = weights.shape_.d[0] * num_groups;
nvinfer1::DimsHW kernel_size;
kernel_size.h() = weights.shape_.d[2];
kernel_size.w() = weights.shape_.d[3];
+ VLOG(2) << "RSCK: ";
+ for (int i = 0; i < 4; i++) {
+ VLOG(2) << " " << weights.shape_.d[i];
+ }
VLOG(2) << "kernel size: " << kernel_size.h() << ", " << kernel_size.w();
// TODO(jie): stride. (NHWC/NCHW)
- auto tf_stride = attrs.get<std::vector<int>>("strides");
+ const auto tf_stride = attrs.get<std::vector<int>>("strides");
VLOG(2) << "h_INDEX" << h_index << ", w_index " << w_index;
VLOG(2) << "stride!!!: " << tf_stride[0] << tf_stride[1] << tf_stride[2]
<< tf_stride[3];
- nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
+ const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
std::vector<std::pair<int, int>> padding;
// TODO(jie): padding.
@@ -1102,6 +1247,7 @@ tensorflow::Status ConvertConv2DHelper(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::DimsHW(padding[0].first, padding[1].first),
nvinfer1::DimsHW(padding[0].second, padding[1].second));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
padding = {{0, 0}, {0, 0}};
tensor = pad_layer->getOutput(0);
auto dim_after = tensor->getDimensions();
@@ -1112,6 +1258,7 @@ tensorflow::Status ConvertConv2DHelper(
nvinfer1::IConvolutionLayer* layer =
ctx.network()->addConvolution(*const_cast<nvinfer1::ITensor*>(tensor),
noutput, kernel_size, weights, biases);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
layer->setPadding({padding[0].first, padding[1].first});
@@ -1126,6 +1273,7 @@ tensorflow::Status ConvertConv2DHelper(
if (data_format == "NHWC") {
// TODO(jie): transpose it back!
output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
} else {
VLOG(2) << "NCHW !!!!";
}
@@ -1147,35 +1295,91 @@ tensorflow::Status ConvertConv2DHelper(
node_def.name());
}
+// Helper function converts input into tensor with shape specified by dims.
+bool PrepareTensorForShape(Converter& ctx, const TRT_TensorOrWeights& input,
+ const nvinfer1::Dims& dims,
+ const nvinfer1::ITensor** tensor) {
+ if (input.is_tensor()) {
+ if (DimsEqual(input.shape(), dims)) {
+ *tensor = input.tensor();
+ } else {
+ nvinfer1::IShuffleLayer* layer = ctx.network()->addShuffle(
+ *const_cast<nvinfer1::ITensor*>(input.tensor()));
+ if (layer != nullptr) {
+ layer->setReshapeDimensions(dims);
+ *tensor = layer->getOutput(0);
+ } else {
+ return false;
+ }
+ }
+ } else {
+#if NV_TENSORRT_MAJOR > 3
+ nvinfer1::IConstantLayer* layer =
+ ctx.network()->addConstant(dims, input.weights());
+ if (layer != nullptr) {
+ *tensor = layer->getOutput(0);
+ } else {
+ return false;
+ }
+#else
+ return false;
+#endif
+ }
+ return true;
+}
+
tensorflow::Status BinaryTensorOpTensor(
Converter& ctx, const tensorflow::NodeDef& node_def,
- const nvinfer1::ITensor* tensor_l, const nvinfer1::ITensor* tensor_r,
+ const TRT_TensorOrWeights& operand_l, const TRT_TensorOrWeights& operand_r,
std::vector<TRT_TensorOrWeights>* outputs) {
static const std::unordered_map<string, nvinfer1::ElementWiseOperation> ops{
{"Add", nvinfer1::ElementWiseOperation::kSUM},
{"Mul", nvinfer1::ElementWiseOperation::kPROD},
{"Sub", nvinfer1::ElementWiseOperation::kSUB},
{"Div", nvinfer1::ElementWiseOperation::kDIV},
+ {"RealDiv", nvinfer1::ElementWiseOperation::kDIV},
+ {"Minimum", nvinfer1::ElementWiseOperation::kMIN},
+ {"Maximum", nvinfer1::ElementWiseOperation::kMAX},
};
- // FIXME assume type matches input weights
+ const nvinfer1::ITensor* tensor_l;
+ const nvinfer1::ITensor* tensor_r;
+
+ nvinfer1::Dims dim_l;
+ nvinfer1::Dims dim_r;
+
+ if (!TensorRTGetBroadcastShape(operand_l.shape(), operand_l.is_tensor(),
+ operand_r.shape(), operand_r.is_tensor(),
+ &dim_l, &dim_r)) {
+ return tensorflow::errors::InvalidArgument(
+ "Binary op broadcast scheme not supported by TensorRT op: " +
+ node_def.op() + ", at: " + node_def.name());
+ }
+
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, operand_l, dim_l, &tensor_l), node_def.name());
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, operand_r, dim_r, &tensor_r), node_def.name());
+
// get trt type & shape
TFAttrs attrs(node_def);
// maybe this part has to be moved into the block of rsqrt later
nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("T");
// check type consistency
- CHECK_EQ_TYPE(tensor_l->getType(), dtype);
- CHECK_EQ_TYPE(tensor_r->getType(), dtype);
+ TFTRT_CHECK_EQ_TYPE(tensor_l->getType(), dtype);
+ TFTRT_CHECK_EQ_TYPE(tensor_r->getType(), dtype);
auto op_pair = ops.find(node_def.op());
- if (op_pair == ops.end())
+ if (op_pair == ops.end()) {
return tensorflow::errors::Unimplemented(
- "binary op: " + node_def.op() +
- " not supported at: " + node_def.name());
+ "binary op: ", node_def.op(), " not supported at: ", node_def.name());
+ }
nvinfer1::IElementWiseLayer* layer = ctx.network()->addElementWise(
+ // TODO(aaroey): will tensor_l/tensor_r get modified?
*const_cast<nvinfer1::ITensor*>(tensor_l),
*const_cast<nvinfer1::ITensor*>(tensor_r), op_pair->second);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
@@ -1202,7 +1406,7 @@ tensorflow::Status ConvertPlugin(Converter& ctx,
// passing attributes
// TODO(jie): support more general attribute
TFAttrs attrs(node_def);
- auto attr_key_vector = attrs.GetAllAttrKey();
+ auto attr_key_vector = attrs.GetAllAttrKeys();
for (auto attr_key : attr_key_vector) {
// TODO(jie): support only list of float for toy example here.
auto data = attrs.get<std::vector<float>>(attr_key);
@@ -1223,29 +1427,6 @@ tensorflow::Status ConvertPlugin(Converter& ctx,
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertPlaceholder(
- Converter& ctx, const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- VLOG(2) << "Placeholder should have been replace already";
- return tensorflow::errors::Unimplemented("cannot convert Placeholder op");
- // OK this make sense since we are supposed to replace it with input
- TFAttrs attrs(node_def);
- nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("dtype");
- nvinfer1::Dims dims = attrs.get<nvinfer1::Dims>("shape");
-
- dims.nbDims--;
- for (int i = 0; i < dims.nbDims; i++) dims.d[i] = dims.d[i + 1];
-
- nvinfer1::ITensor* output =
- ctx.network()->addInput(node_def.name().c_str(), dtype, dims);
- if (!output) {
- return tensorflow::errors::InvalidArgument("Failed to create Input layer");
- }
- outputs->push_back(TRT_TensorOrWeights(output));
- return tensorflow::Status::OK();
-}
-
tensorflow::Status ConvertConv2D(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
@@ -1271,65 +1452,64 @@ tensorflow::Status ConvertPool(Converter& ctx,
int h_index = 2;
int w_index = 3;
- auto data_format = attrs.get<string>("data_format");
+ const auto data_format = attrs.get<string>("data_format");
if (data_format == "NHWC") {
h_index = 1;
w_index = 2;
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
{0, 3, 1, 2});
- } else {
- VLOG(2) << "NCHW !!!!";
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
}
+
nvinfer1::PoolingType type;
- // TODO(jie): support other pooling type
- if (node_def.op() == "MaxPool")
+ if (node_def.op() == "MaxPool") {
type = nvinfer1::PoolingType::kMAX;
- else if (node_def.op() == "AvgPool")
+ } else if (node_def.op() == "AvgPool") {
type = nvinfer1::PoolingType::kAVERAGE;
- else
- return tensorflow::errors::Unimplemented("Only supports Max pool");
+ } else {
+ return tensorflow::errors::Unimplemented("Unsupported pool type: ",
+ node_def.op());
+ }
- // TODO(jie): NCHW
- auto tf_stride = attrs.get<std::vector<int>>("strides");
- nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
+ const auto tf_stride = attrs.get<std::vector<int>>("strides");
+ const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
- auto tf_kernel = attrs.get<std::vector<int>>("ksize");
- nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
+ const auto tf_kernel = attrs.get<std::vector<int>>("ksize");
+ const nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
auto tensor_dim = tensor->getDimensions();
std::vector<std::pair<int, int>> padding;
- // TODO(jie): padding.
- if (attrs.get<string>("padding") == "SAME") {
+ const string padding_type = attrs.get<string>("padding");
+ if (padding_type == "SAME") {
// This is NCHW tensor with no batch dimension.
// 1 -> h
// 2 -> w
padding = CreateSamePadding(
stride, ksize,
{static_cast<int>(tensor_dim.d[1]), static_cast<int>(tensor_dim.d[2])});
- } else if (attrs.get<string>("padding") == "VALID") {
- // No padding for valid padding here
- VLOG(2) << "No padding added for VALID padding in pool" << node_def.name();
+ } else if (padding_type == "VALID") {
padding = {{0, 0}, {0, 0}};
} else {
- return tensorflow::errors::Unimplemented(
- "Current MaxPool cannot support padding other than SAME");
+ return tensorflow::errors::Unimplemented("Unsupported padding type: ",
+ padding_type);
}
if (padding[0].first != padding[0].second ||
padding[1].first != padding[1].second) {
- // TODO(jie): handle asymmetric padding
VLOG(2) << "Padding!!!: " << padding[0].first << padding[0].second
<< padding[1].first << padding[1].second;
auto pad_layer = ctx.network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::DimsHW(padding[0].first, padding[1].first),
nvinfer1::DimsHW(padding[0].second, padding[1].second));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
padding = {{0, 0}, {0, 0}};
tensor = pad_layer->getOutput(0);
}
nvinfer1::IPoolingLayer* layer = ctx.network()->addPooling(
*const_cast<nvinfer1::ITensor*>(tensor), type, ksize);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
layer->setPadding({padding[0].first, padding[1].first});
@@ -1337,10 +1517,8 @@ tensorflow::Status ConvertPool(Converter& ctx,
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
if (data_format == "NHWC") {
- // TODO(jie): transpose it back!
output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
- } else {
- VLOG(2) << "NCHW !!!!";
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
@@ -1353,6 +1531,7 @@ tensorflow::Status ConvertActivation(
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
nvinfer1::IActivationLayer* layer = ctx.network()->addActivation(
*const_cast<nvinfer1::ITensor*>(tensor), nvinfer1::ActivationType::kRELU);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
@@ -1363,40 +1542,61 @@ tensorflow::Status ConvertScale(Converter& ctx,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
+ !inputs.at(1).is_weights()) {
return tensorflow::errors::Unimplemented(
- "Only supports tensor op weight for now, at " + node_def.name());
- // Implement tensor binaryOp weight [channel wise] for now;
- const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+ "ConvertScale only supports tensor<op>weight: ", node_def.name());
+ }
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
TRT_ShapedWeights weights = inputs.at(1).weights();
if (ctx.isFP16()) {
weights = ConvertFP32ToFP16(ctx, inputs.at(1).weights());
}
TRT_ShapedWeights empty_weights(weights.type_);
-
TFAttrs attrs(node_def);
- // Transpose NHWC
- auto data_format = attrs.get<string>("data_format");
+ const auto data_format = attrs.get<string>("data_format");
+ int channel_index;
+ const auto dims = tensor->getDimensions();
if (data_format == "NHWC") {
- tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
- {0, 3, 1, 2});
- // TODO(jie): transpose it
+ // 1). NHWC is really N+C
+ channel_index = dims.nbDims - 1; // batch dimension is implicit here!
} else {
- VLOG(2) << "NCHW !!!!";
+ // 2). NCHW is really N+CHW
+ channel_index = dims.nbDims - 3; // batch dimension is implicit here!
}
- auto dims = tensor->getDimensions();
- VLOG(2) << "tensor dimensions: " << dims.nbDims;
- for (int i = 0; i < dims.nbDims; i++) {
- VLOG(2) << "i: " << dims.d[i];
+ nvinfer1::Permutation permutation;
+ for (int32_t i = 0; i < dims.nbDims; ++i) {
+ permutation.order[i] = i;
}
- dims = weights.shape_;
- VLOG(2) << "tensor dimensions: " << dims.nbDims;
- for (int i = 0; i < dims.nbDims; i++) {
- VLOG(2) << "i: " << dims.d[i];
+
+ if (channel_index >= 0) {
+ permutation.order[0] = channel_index;
+ permutation.order[channel_index] = 0;
+ } else {
+ return tensorflow::errors::Unimplemented(
+ "TFTRT::BiasAdd cannot apply on batch dimension, at ", node_def.name());
+ }
+
+ // TensorRT addScale requires input to be of rank 3, we need to apply
+ // transpose as well as reshape
+ if (channel_index != 0 || dims.nbDims != 3) {
+ nvinfer1::IShuffleLayer* shuffle_layer =
+ ctx.network()->addShuffle(*const_cast<nvinfer1::ITensor*>(tensor));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
+ nvinfer1::Dims reshape_dims;
+ reshape_dims.nbDims = 3;
+ reshape_dims.d[0] = 0; // 0 copy from the input
+ reshape_dims.d[1] = dims.nbDims >= 2 ? 0 : 1; // 0 copy from the input
+ reshape_dims.d[2] = dims.nbDims >= 3 ? -1 : 1; // -1 infer from the rest
+ if (channel_index != 0) {
+ // maybe we do not need this check. concerned about TRT optimization
+ shuffle_layer->setFirstTranspose(permutation);
+ }
+ shuffle_layer->setReshapeDimensions(reshape_dims);
+ tensor = shuffle_layer->getOutput(0);
}
nvinfer1::ScaleMode mode = nvinfer1::ScaleMode::kCHANNEL;
@@ -1407,14 +1607,26 @@ tensorflow::Status ConvertScale(Converter& ctx,
nvinfer1::IScaleLayer* layer =
ctx.network()->addScale(*const_cast<nvinfer1::ITensor*>(tensor), mode,
weights, empty_weights, empty_weights);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
- if (data_format == "NHWC") {
- // TODO(jie): transpose it back!
- output_tensor = ctx.TransposeTensor(output_tensor, {0, 2, 3, 1});
- } else {
- VLOG(2) << "NCHW !!!!";
+
+ // restore transpose & reshape
+ if (channel_index != 0 || dims.nbDims != 3) {
+ nvinfer1::IShuffleLayer* shuffle_layer = ctx.network()->addShuffle(
+ *const_cast<nvinfer1::ITensor*>(output_tensor));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
+ nvinfer1::Dims reshape_dims = dims;
+ int tmp = reshape_dims.d[channel_index];
+ reshape_dims.d[channel_index] = reshape_dims.d[0];
+ reshape_dims.d[0] = tmp;
+ shuffle_layer->setReshapeDimensions(reshape_dims);
+ if (channel_index != 0) {
+ shuffle_layer->setSecondTranspose(permutation);
+ }
+ output_tensor = shuffle_layer->getOutput(0);
}
+
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
@@ -1431,11 +1643,13 @@ tensorflow::Status ConvertConst(Converter& ctx,
// Create shaped weights as output
tensorflow::Tensor tensor;
- if (!tensor.FromProto(weights_tensor))
- return tensorflow::errors::Internal("Cannot parse weight tensor proto: " +
+ if (!tensor.FromProto(weights_tensor)) {
+ return tensorflow::errors::Internal("Cannot parse weight tensor proto: ",
node_def.name());
+ }
TRT_ShapedWeights weights(dtype);
+ // TODO(aaroey): we should choose the array using dtype and shape.
if (!weights_tensor.float_val().empty()) {
VLOG(2) << "SCALAR!!!" << node_def.name();
nvinfer1::Dims scalar_shape;
@@ -1443,22 +1657,16 @@ tensorflow::Status ConvertConst(Converter& ctx,
VLOG(2) << "dimensions: " << tensor.dims();
VLOG(2) << "size: " << weights_tensor.float_val_size();
scalar_shape = GetTensorShape(tensor);
+ VLOG(2) << "details: ";
for (int i = 0; i < scalar_shape.nbDims; i++)
VLOG(2) << scalar_shape.d[i];
- if (GetShapeSize(scalar_shape) != weights_tensor.float_val_size()) {
- if (weights_tensor.float_val_size() == 1 ||
- scalar_shape.d[0] == weights_tensor.float_val_size()) {
- scalar_shape.nbDims = 1;
- // no dimension provided. flatten it
- scalar_shape.d[0] = weights_tensor.float_val_size();
- scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
- } else {
- LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
- << " kUNIFORM, at: " << node_def.name();
- string err_str("Broadcast method is not supported for '");
- StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
- return tensorflow::errors::InvalidArgument(err_str);
- }
+ if (GetShapeSize(scalar_shape) != weights_tensor.float_val_size() &&
+ weights_tensor.float_val_size() != 1) {
+ LOG(ERROR) << "Broadcast on weights only supports kCHANNEL and"
+ << " kUNIFORM, at: " << node_def.name();
+ string err_str("Broadcast method is not supported for '");
+ StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
+ return tensorflow::errors::InvalidArgument(err_str);
}
} else {
VLOG(2) << "Dimensions: " << tensor.dims();
@@ -1468,39 +1676,42 @@ tensorflow::Status ConvertConst(Converter& ctx,
scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
for (int i = 1; i < nvinfer1::Dims::MAX_DIMS; i++) {
scalar_shape.d[i] = 0;
- scalar_shape.type[i] = nvinfer1::DimensionType::kSPATIAL;
}
}
+ // TODO(aaroey): use GetShapeSize().
size_t len_data = tensorflow::DataTypeSize(dtype);
for (int i = 0; i < scalar_shape.nbDims; i++) len_data *= scalar_shape.d[i];
ctx.weight_store()->store_.push_back(std::vector<uint8_t>(len_data));
void* dst = static_cast<void*>(&(ctx.weight_store()->store_.back()[0]));
- std::vector<float> tensor_data(
- weights_tensor.float_val().begin(),
- weights_tensor.float_val()
- .end()); // make a local copy first to flatten
- memcpy(dst, tensor_data.data(), len_data); // store into weight store
+ if (weights_tensor.float_val_size() == 1) {
+ std::fill_n((float*)dst, GetShapeSize(scalar_shape),
+ *weights_tensor.float_val().begin());
+ } else {
+ // TODO(aaroey): get rid of this copy as RepeatedField is always
+ // contiguous make a local copy first to flatten doesn't have to be
+ // contiguous
+ std::vector<float> tensor_data(weights_tensor.float_val().begin(),
+ weights_tensor.float_val().end());
+ memcpy(dst, tensor_data.data(), len_data); // store into weight store
+ }
+ VLOG(2) << "create shape details: ";
+ for (int i = 0; i < scalar_shape.nbDims; i++) VLOG(2) << scalar_shape.d[i];
weights = TRT_ShapedWeights(dtype, dst, scalar_shape);
} else if (!weights_tensor.int_val().empty()) {
+ // TODO(aaroey): this is very similar to the above code for float, merge
+ // them.
VLOG(2) << "int!!!" << node_def.name();
nvinfer1::Dims scalar_shape;
if (tensor.dims() > 0) {
VLOG(2) << "dimensions: " << tensor.dims();
scalar_shape = GetTensorShape(tensor);
- if (GetShapeSize(scalar_shape) != weights_tensor.int_val_size()) {
- if (weights_tensor.int_val_size() == 1 ||
- scalar_shape.d[0] == weights_tensor.int_val_size()) {
- scalar_shape.nbDims = 1;
- // no dimension provided. flatten it
- scalar_shape.d[0] = weights_tensor.int_val_size();
- scalar_shape.type[0] = nvinfer1::DimensionType::kSPATIAL;
- } else {
- LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
- << " kUNIFORM, at: " << node_def.name();
- string err_str("Broadcast method is not supported for '");
- StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
- return tensorflow::errors::InvalidArgument(err_str);
- }
+ if (GetShapeSize(scalar_shape) != weights_tensor.int_val_size() &&
+ weights_tensor.int_val_size() != 1) {
+ LOG(WARNING) << "Broadcast on weights only supports kCHANNEL and"
+ << " kUNIFORM, at: " << node_def.name();
+ string err_str("Broadcast method is not supported for '");
+ StrAppend(&err_str, node_def.name(), "' of type ", node_def.op());
+ return tensorflow::errors::InvalidArgument(err_str);
}
} else {
VLOG(2) << "dimensions: " << tensor.dims();
@@ -1513,23 +1724,30 @@ tensorflow::Status ConvertConst(Converter& ctx,
scalar_shape.type[i] = nvinfer1::DimensionType::kSPATIAL;
}
}
- // we should not have converted //if (ctx.isFP16()) {
+ // we should not have converted
size_t len_data = tensorflow::DataTypeSize(dtype);
for (int i = 0; i < scalar_shape.nbDims; i++) len_data *= scalar_shape.d[i];
size_t len_tensor = weights_tensor.int_val_size() * sizeof(int32);
len_data = std::max(len_data, len_tensor);
ctx.weight_store()->store_.push_back(std::vector<uint8_t>(len_data));
void* dst = static_cast<void*>(&(ctx.weight_store()->store_.back()[0]));
- std::vector<int32> tensor_data(
- weights_tensor.int_val().begin(),
- weights_tensor.int_val().end()); // make a local copy first to flatten
- // doesn't have to be contigous
- memcpy(dst, tensor_data.data(), len_tensor); // store into weight store
+ if (weights_tensor.int_val_size() == 1) {
+ std::fill_n((int*)dst, GetShapeSize(scalar_shape),
+ *weights_tensor.int_val().begin());
+ } else {
+ // TODO(aaroey): get rid of this copy as RepeatedField is always
+ // contiguous make a local copy first to flatten doesn't have to be
+ // contiguous
+ std::vector<int32> tensor_data(weights_tensor.int_val().begin(),
+ weights_tensor.int_val().end());
+ memcpy(dst, tensor_data.data(), len_tensor); // store into weight store
+ }
weights = TRT_ShapedWeights(dtype, dst, scalar_shape);
} else if (!weights_tensor.tensor_content().empty()) {
- // obsolete method.
- // After optimization path, we do not see weights in this format.
- // fp16 conversion technically should be needed here.
+ // obsolete method.
+ // After optimization path, we do not see weights in this format.
+ // TODO(aaroey): why?
+ // fp16 conversion technically should be needed here.
VLOG(2) << "TENSOR!!!" << node_def.name();
const auto& content = weights_tensor.tensor_content();
@@ -1543,8 +1761,8 @@ tensorflow::Status ConvertConst(Converter& ctx,
content, static_cast<char*>(const_cast<void*>(weights.GetValues())));
}
} else {
- return tensorflow::errors::Unimplemented(
- "Not supported constant type, at " + node_def.name());
+ return tensorflow::errors::Unimplemented("Not supported constant type, at ",
+ node_def.name());
}
// Pass the output
outputs->push_back(TRT_TensorOrWeights(weights));
@@ -1563,96 +1781,144 @@ tensorflow::Status ConvertBinary(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
- if (inputs.size() != 2)
+ if (inputs.size() != 2) {
return tensorflow::errors::FailedPrecondition(
- "Binary ops require two tensor input, at " + node_def.name());
-
- if (inputs.at(0).is_weights() && inputs.at(1).is_weights())
- return ConstantFoldBinary(ctx, node_def, inputs, outputs);
-
- if (inputs.at(0).is_tensor() && inputs.at(1).is_weights())
- return BinaryTensorOpWeight(ctx, node_def, inputs.at(0).tensor(),
- inputs.at(1).weights(), outputs);
+ "Binary ops require two tensor input, at ", node_def.name());
+ }
- if (inputs.at(0).is_weights() && inputs.at(1).is_tensor())
- return BinaryTensorOpWeight(ctx, node_def, inputs.at(1).tensor(),
- inputs.at(0).weights(), outputs);
+ // Constant folding should have been done by TensorFlow
- if (inputs.at(0).is_tensor() && inputs.at(1).is_tensor())
- return BinaryTensorOpTensor(ctx, node_def, inputs.at(0).tensor(),
- inputs.at(1).tensor(), outputs);
+ if (inputs.at(0).is_weights() && inputs.at(1).is_weights()) {
+ return tensorflow::errors::Unimplemented(
+ "Constant folding is falled back to TensorFlow, binary op received "
+ "both input as constant at: ",
+ node_def.name());
+ }
- return tensorflow::errors::Unknown("Binary op input error, at " +
- node_def.name());
+ // Try to convert into Scale layer first (for better performance)
+ // Since scale layer supports restricted broadcast policy and op types, we
+ // allow failure and try to handle it through Elementwise op
+ // (BinaryTensorOpTensor)
+ Status status = tensorflow::Status::OK();
+ if (inputs.at(0).is_tensor() && inputs.at(1).is_weights()) {
+ status = BinaryTensorOpWeight(ctx, node_def, inputs.at(0).tensor(),
+ inputs.at(1).weights(), false, outputs);
+ } else if (inputs.at(0).is_weights() && inputs.at(1).is_tensor()) {
+ status = BinaryTensorOpWeight(ctx, node_def, inputs.at(1).tensor(),
+ inputs.at(0).weights(), true, outputs);
+#if NV_TENSORRT_MAJOR == 3
+ } else {
+#else
+ }
+ if ((inputs.at(0).is_tensor() && inputs.at(1).is_tensor()) || !status.ok()) {
+#endif
+ status = BinaryTensorOpTensor(ctx, node_def, inputs.at(0), inputs.at(1),
+ outputs);
+ }
+ return status;
}
tensorflow::Status ConvertUnary(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
- if (inputs.size() != 1)
+ static const std::unordered_map<string, nvinfer1::UnaryOperation> ops{
+ {"Neg", nvinfer1::UnaryOperation::kNEG},
+ {"Exp", nvinfer1::UnaryOperation::kEXP},
+ {"Log", nvinfer1::UnaryOperation::kLOG},
+ {"Sqrt", nvinfer1::UnaryOperation::kSQRT},
+ {"Abs", nvinfer1::UnaryOperation::kABS},
+ {"Reciprocal", nvinfer1::UnaryOperation::kRECIP},
+ };
+
+ if (inputs.size() != 1) {
return tensorflow::errors::FailedPrecondition(
- "Unary ops require single tensor input, at " + node_def.name());
+ "Unary ops require single tensor input, at ", node_def.name());
+ }
- if (inputs.at(0).is_weights())
- return ConstantFoldUnary(ctx, node_def, inputs, outputs);
- else if (inputs.at(0).is_tensor())
+#if NV_TENSORRT_MAJOR == 3
+ if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
- "Unary op for tensor not supported, at " + node_def.name());
+ "Constant folding for unary op is not supported", node_def.name());
+ }
+#endif
- return tensorflow::errors::Unknown("Binary op input error, at " +
- node_def.name());
+ // TODO(jie): check type
+ const nvinfer1::ITensor* tensor;
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, inputs.at(0), inputs.at(0).shape(), &tensor),
+ node_def.name());
+
+ nvinfer1::IUnaryLayer* layer;
+ if (node_def.op() == "Rsqrt") {
+ layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kSQRT);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ tensor = layer->getOutput(0);
+ layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ nvinfer1::UnaryOperation::kRECIP);
+ } else if (ops.count(node_def.op()) != 0) {
+ layer = ctx.network()->addUnary(*const_cast<nvinfer1::ITensor*>(tensor),
+ ops.at(node_def.op()));
+ } else {
+ return tensorflow::errors::InvalidArgument(
+ "Binary op: ", node_def.op(), " not supported, at ", node_def.name());
+ }
+
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ return tensorflow::Status::OK();
}
-tensorflow::Status ConvertReduce(Converter& ctx,
- const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
+#if NV_TENSORRT_MAJOR == 3
+tensorflow::Status ConvertReducePool(
+ Converter& ctx, const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
+ !inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
- "Input expects tensor and weights, at" + node_def.name());
+ "Input expects tensor and weights, at", node_def.name());
+ }
// Implement tensor binaryOp weight [channel wise] for now;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- auto dims = tensor->getDimensions();
+ const auto dims = tensor->getDimensions();
// Restore implicit batch dimension
- int nb_dims = dims.nbDims + 1;
+ const int nb_dims = dims.nbDims + 1;
TRT_ShapedWeights index_list = inputs.at(1).weights();
-
TFAttrs attrs(node_def);
- // TODO(jie): handle data type.
- // Index type here is done through TF type, so I can leverage their
- // EnumToDataType for my cast
auto index_type = attrs.get<tensorflow::DataType>("Tidx");
// Only expect to handle INT32 as attributes for now
- if (index_type != tensorflow::DataType::DT_INT32)
+ if (index_type != tensorflow::DataType::DT_INT32) {
return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
- auto index_list_data =
+ }
+ const auto index_list_data =
static_cast<int*>(const_cast<void*>(index_list.GetValues()));
- // Hack warning: have to fall back to pool layer since reduce is not in public
- // TRT yet.
- if (nb_dims != 4)
+ if (nb_dims != 4) {
return tensorflow::errors::InvalidArgument(
- "TRT only support reduce on 4 dimensional tensors, at" +
+ "TRT only support reduce on 4 dimensional tensors, at",
node_def.name());
- if (index_list.count() > 2)
+ }
+ if (index_list.count() > 2) {
return tensorflow::errors::InvalidArgument(
- "TRT cannot support reduce on more than 2 dimensions, at" +
+ "TRT cannot support reduce on more than 2 dimensions, at",
node_def.name());
+ }
std::set<int> idx_set;
// We cannot operate on Channel. permutation flag used to transpose tensor
int permuted_index = -1;
for (int i = 0; i < index_list.count(); i++) {
- if (index_list_data[i] == 0)
- return tensorflow::errors::InvalidArgument("TRT cannot reduce at 0, at" +
+ if (index_list_data[i] == 0) {
+ return tensorflow::errors::InvalidArgument("TRT cannot reduce at 0, at",
node_def.name());
+ }
if (index_list_data[i] == 1) permuted_index = 1;
-
idx_set.emplace(index_list_data[i]);
}
@@ -1673,6 +1939,7 @@ tensorflow::Status ConvertReduce(Converter& ctx,
// Apply permutation before extracting dimension for pool_kernel
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
permutation_order);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
}
// Apply permutation before extracting dimension for pool_kernel
@@ -1685,34 +1952,104 @@ tensorflow::Status ConvertReduce(Converter& ctx,
nvinfer1::IPoolingLayer* layer =
ctx.network()->addPooling(*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::PoolingType::kAVERAGE, pool_kernel);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
output_tensor = layer->getOutput(0);
} else {
- return tensorflow::errors::Unimplemented(
- "Op not supported " + node_def.op() + " , at " + node_def.name());
+ return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
+ " , at ", node_def.name());
}
if (permuted_index != -1) {
// Apply permutation before extracting dimension for pool_kernel
output_tensor = ctx.TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), permutation_order);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
+#elif NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertReduce(Converter& ctx,
+ const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
+ !inputs.at(1).is_weights()) {
+ return tensorflow::errors::InvalidArgument(
+ "Input expects tensor and weights, at", node_def.name());
+ }
+
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
+ TRT_ShapedWeights index_list = inputs.at(1).weights();
+
+ TFAttrs attrs(node_def);
+ auto index_type = attrs.get<tensorflow::DataType>("Tidx");
+
+ // Only expect to handle INT32 as attributes for now
+ if (index_type != tensorflow::DataType::DT_INT32) {
+ return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
+ }
+
+ const auto keep_dims = attrs.get<bool>("keep_dims");
+ auto index_list_data =
+ static_cast<int*>(const_cast<void*>(index_list.GetValues()));
+
+ int axes = 0;
+ if (index_list.count() == 0) {
+ return tensorflow::errors::InvalidArgument(
+ "TRT cannot support reduce on all (batch) dimensions, at",
+ node_def.name());
+ } else {
+ for (int i = 0; i < index_list.count(); i++) {
+ if (index_list_data[i] == 0) {
+ return tensorflow::errors::InvalidArgument(
+ "TRT cannot reduce at batch dimension, at", node_def.name());
+ }
+ axes |= (1 << (index_list_data[i] - 1));
+ }
+ }
+
+ nvinfer1::ReduceOperation reduce_operation;
+ if (node_def.op() == "Sum") {
+ reduce_operation = nvinfer1::ReduceOperation::kSUM;
+ } else if (node_def.op() == "Prod") {
+ reduce_operation = nvinfer1::ReduceOperation::kPROD;
+ } else if (node_def.op() == "Max") {
+ reduce_operation = nvinfer1::ReduceOperation::kMAX;
+ } else if (node_def.op() == "Min") {
+ reduce_operation = nvinfer1::ReduceOperation::kMIN;
+ } else if (node_def.op() == "Mean") {
+ reduce_operation = nvinfer1::ReduceOperation::kAVG;
+ } else {
+ return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
+ " , at ", node_def.name());
+ }
+
+ nvinfer1::ILayer* layer =
+ ctx.network()->addReduce(*const_cast<nvinfer1::ITensor*>(tensor),
+ reduce_operation, axes, keep_dims);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+
+ outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
+ return tensorflow::Status::OK();
+}
+#endif
tensorflow::Status ConvertPad(Converter& ctx,
const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
+ // TODO(aaroey): make a routine for this check and reuse it.
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
+ !inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
- "Input expects tensor and weights, at" + node_def.name());
+ "Input expects tensor and weights, at", node_def.name());
+ }
// Implement tensor binaryOp weight [channel wise] for now;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- auto dims = tensor->getDimensions();
+ const auto dims = tensor->getDimensions();
// Restore implicit batch dimension
- int nb_dims = dims.nbDims + 1;
+ const int nb_dims = dims.nbDims + 1;
TRT_ShapedWeights pads = inputs.at(1).weights();
@@ -1722,21 +2059,24 @@ tensorflow::Status ConvertPad(Converter& ctx,
auto padding_type = attrs.get<tensorflow::DataType>("Tpaddings");
// TODO(jie): handle data type conversion for TRT?
- if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2)
+ if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2) {
return tensorflow::errors::InvalidArgument(
- "Pad only supports explicit padding on 4 dimensional tensor, at " +
+ "Pad only supports explicit padding on 4 dimensional tensor, at ",
node_def.name());
+ }
// Only expect to handle INT32 as attributes for now
- if (padding_type != tensorflow::DataType::DT_INT32)
+ if (padding_type != tensorflow::DataType::DT_INT32) {
return tensorflow::errors::Unimplemented(
"Tpaddings supports only DT_INT32");
+ }
auto pad_data = static_cast<int*>(const_cast<void*>(pads.GetValues()));
std::vector<int32_t> pad_index;
for (int i = 0; i < nb_dims; i++) {
- if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0)
+ if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0) {
pad_index.push_back(i);
+ }
}
// No padding at all, we should exit
@@ -1746,20 +2086,23 @@ tensorflow::Status ConvertPad(Converter& ctx,
}
// Only supports padding on less than 2 axis GIE-2579
- if (pad_index.size() > 2)
+ if (pad_index.size() > 2) {
return tensorflow::errors::InvalidArgument(
"Padding layer does not support padding on > 2");
+ }
// Padding on batch dimension is not supported
- if (pad_index[0] == 0)
+ if (pad_index[0] == 0) {
return tensorflow::errors::InvalidArgument(
"Padding layer does not support padding on batch dimension");
+ }
// Not doing the legit thing here. ignoring padding on dim 1 and 3;
// TODO(jie): implement pad as uff parser
- if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3)
+ if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3) {
return tensorflow::errors::Unimplemented(
"Padding layer does not support padding on dimension 1 and 3 yet");
+ }
bool legit_pad = true;
nvinfer1::DimsHW pre_padding(0, 0);
@@ -1770,6 +2113,7 @@ tensorflow::Status ConvertPad(Converter& ctx,
legit_pad = false;
tensor = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor),
{0, 3, 2, 1});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor, node_def.name());
permuted_pad_index[0] = 3;
}
@@ -1786,11 +2130,14 @@ tensorflow::Status ConvertPad(Converter& ctx,
nvinfer1::IPaddingLayer* layer = ctx.network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor), pre_padding, post_padding);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
- if (!legit_pad)
+ if (!legit_pad) {
output_tensor = ctx.TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), {0, 3, 2, 1});
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
+ }
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
@@ -1803,9 +2150,10 @@ tensorflow::Status ConvertConcat(Converter& ctx,
// not including the last input (axis) here
int input_size = static_cast<int>(inputs.size()) - 1;
- if (!inputs.at(0).is_tensor())
+ if (!inputs.at(0).is_tensor()) {
return tensorflow::errors::InvalidArgument(
- "Concat in TRT support only Tensor input, at " + node_def.name());
+ "Concat in TRT support only Tensor input, at ", node_def.name());
+ }
// We are retrieving the axis
TRT_ShapedWeights axis = inputs.at(input_size).weights();
@@ -1816,8 +2164,8 @@ tensorflow::Status ConvertConcat(Converter& ctx,
// TODO(jie): handle data type
// Only expect to handle INT32 as index attributes for now
if (index_type != tensorflow::DataType::DT_INT32)
- return tensorflow::errors::Unimplemented(
- "Tidx supports only DT_INT32, at " + node_def.name());
+ return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32, at ",
+ node_def.name());
int index = *(static_cast<int*>(const_cast<void*>(axis.GetValues())));
@@ -1825,23 +2173,29 @@ tensorflow::Status ConvertConcat(Converter& ctx,
auto dim = inputs.at(0).tensor()->getDimensions();
// dimension check
- if (index > dim.nbDims + 1)
+ if (index > dim.nbDims + 1) {
return tensorflow::errors::InvalidArgument(
- "Concatenate on axis out of dimension range, at " + node_def.name());
-
- if (index == 0)
+ "Concatenate on axis out of dimension range, at ", node_def.name());
+ }
+ if (index == 0) {
return tensorflow::errors::InvalidArgument(
- "Concatenate on batch dimension not supported, at " + node_def.name());
+ "Concatenate on batch dimension not supported, at ", node_def.name());
+ }
+ if (index < 0) {
+ index = dim.nbDims + index + 1;
+ }
+#if NV_TENSORRT_MAJOR == 3
// incase we need permutation;
std::vector<int> permutation_order(dim.nbDims + 1);
for (int i = 0; i < dim.nbDims + 1; i++) permutation_order[i] = i;
if (index != 1) {
- permutation_order[1] = index - 1;
- permutation_order[index - 1] = 1;
+ permutation_order[1] = index;
+ permutation_order[index] = 1;
}
+#endif
std::vector<nvinfer1::ITensor const*> inputs_vec;
// Shap chack (all input tensor should have same shape)
@@ -1849,24 +2203,28 @@ tensorflow::Status ConvertConcat(Converter& ctx,
for (int i = 0; i < input_size; i++) {
auto tensor_i = inputs.at(i).tensor();
auto dim_i = tensor_i->getDimensions();
- if (dim_i.nbDims != dim.nbDims)
+ if (dim_i.nbDims != dim.nbDims) {
return tensorflow::errors::InvalidArgument(
- "Concatenate receives inputs with inconsistent dimensions, at " +
+ "Concatenate receives inputs with inconsistent dimensions, at ",
node_def.name());
-
+ }
for (int j = 0; j < dim.nbDims; j++) {
// check dimension consistency on non-concatenate axis
- if (j != index - 1 && dim_i.d[j] != dim.d[j])
+ if (j != index - 1 && dim_i.d[j] != dim.d[j]) {
return tensorflow::errors::InvalidArgument(
- "Concatenate receives inputs with inconsistent shape, at" +
+ "Concatenate receives inputs with inconsistent shape, at",
node_def.name());
+ }
}
- // TRT does concatenation only on channel!
- if (index != 1)
+#if NV_TENSORRT_MAJOR == 3
+ // TRT3 does concatenation only on channel!
+ if (index != 1) {
tensor_i = ctx.TransposeTensor(const_cast<nvinfer1::ITensor*>(tensor_i),
permutation_order);
-
+ TFTRT_RETURN_ERROR_IF_NULLPTR(tensor_i, node_def.name());
+ }
+#endif
inputs_vec.push_back(tensor_i);
}
@@ -1874,11 +2232,18 @@ tensorflow::Status ConvertConcat(Converter& ctx,
nvinfer1::IConcatenationLayer* layer = ctx.network()->addConcatenation(
const_cast<nvinfer1::ITensor* const*>(inputs_vec.data()),
inputs_vec.size());
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+#if NV_TENSORRT_MAJOR > 3
+ layer->setAxis(index - 1);
+#endif
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+#if NV_TENSORRT_MAJOR == 3
if (index != 1) {
output_tensor = ctx.TransposeTensor(output_tensor, permutation_order);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(output_tensor, node_def.name());
}
+#endif
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
@@ -1997,112 +2362,243 @@ tensorflow::Status ConvertFusedBatchNorm(
combined_offset_weights.GetWeightsForTRT(),
combined_scale_weights.GetWeightsForTRT(),
dummy_power_weights.GetWeightsForTRT());
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertMatMul(Converter& ctx,
- const tensorflow::NodeDef& node_def,
- const std::vector<TRT_TensorOrWeights>& inputs,
- std::vector<TRT_TensorOrWeights>* outputs) {
- const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
-
- // TODO(jie): transpose!
- TFAttrs attrs(node_def);
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertMatMulHelper(
+ Converter& ctx, TRT_TensorOrWeights tensor_input,
+ TRT_ShapedWeights weights_raw, bool transpose_weight, string node_name,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ nvinfer1::ITensor* output_tensor;
+ if (!tensor_input.is_tensor()) {
+ return tensorflow::errors::InvalidArgument("Input 0 expects tensor");
+ }
+ const nvinfer1::ITensor* tensor = tensor_input.tensor();
- TRT_ShapedWeights weights_ck = inputs.at(1).weights();
- TRT_ShapedWeights weights = ctx.get_temp_weights_like(weights_ck);
- ReorderCKtoKC(weights_ck, &weights);
+ TRT_ShapedWeights weights(weights_raw.type_);
+ if (transpose_weight) {
+ weights = weights_raw;
+ } else {
+ TRT_ShapedWeights weights_ck = weights_raw;
+ weights = ctx.get_temp_weights_like(weights_ck);
+ ReorderCKtoKC(weights_raw, &weights);
+ }
TRT_ShapedWeights biases(weights.type_);
int noutput = weights.shape_.d[0];
+ auto input_dim = tensor->getDimensions();
+ while (input_dim.nbDims != 3) {
+ input_dim.d[input_dim.nbDims++] = 1;
+ }
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, tensor_input, input_dim, &tensor), node_name);
+
nvinfer1::IFullyConnectedLayer* layer = ctx.network()->addFullyConnected(
*const_cast<nvinfer1::ITensor*>(tensor), noutput, weights, biases);
-
- nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_name);
+ output_tensor = layer->getOutput(0);
+
+ const nvinfer1::ITensor* temp_tensor;
+ auto output_dim = output_tensor->getDimensions();
+ output_dim.nbDims = 1;
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, TRT_TensorOrWeights(output_tensor), output_dim,
+ &temp_tensor),
+ node_name);
+ output_tensor = const_cast<nvinfer1::ITensor*>(temp_tensor);
outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
-tensorflow::Status ConvertReshape(
+// inputs are both two dimensional (tensorflow::ops::MatMul)
+tensorflow::Status ConvertMatMul(Converter& ctx,
+ const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ if (!inputs.at(0).is_tensor()) {
+ return tensorflow::errors::InvalidArgument("Input 0 expects tensor, at" +
+ node_def.name());
+ }
+
+ TFAttrs attrs(node_def);
+ // TODO(jie): INT32 should be converted?
+ tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
+ if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
+ tf_dtype != tensorflow::DataType::DT_HALF) {
+ return tensorflow::errors::Unimplemented(
+ "data type is not supported, for node " + node_def.name() + " got " +
+ tensorflow::DataTypeString(tf_dtype));
+ }
+ bool transpose_a = attrs.get<bool>("transpose_a");
+ bool transpose_b = attrs.get<bool>("transpose_b");
+
+ // FullyConnected:
+ if (transpose_a) {
+ return tensorflow::errors::Internal(
+ "Transpose_a is not supported for TensorRT FullyConnected (op: " +
+ node_def.op() + "), at: " + node_def.name());
+ }
+ if (inputs.at(1).is_tensor()) {
+ return tensorflow::errors::Internal(
+ "Operand 1 must be constant for TensorRT FullyConnected (op: " +
+ node_def.op() + "), at: " + node_def.name());
+ }
+ return ConvertMatMulHelper(ctx, inputs.at(0), inputs.at(1).weights(),
+ transpose_b, node_def.name(), outputs);
+}
+
+tensorflow::Status ConvertBatchMatMul(
Converter& ctx, const tensorflow::NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs) {
- if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
- !inputs.at(1).is_weights())
- return tensorflow::errors::InvalidArgument(
- "Input expects tensor and weights, at" + node_def.name());
+ TFAttrs attrs(node_def);
- // implement tensor binaryOp weight [channel wise] for now;
- const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- auto dims = tensor->getDimensions();
- // restore implicit batch dimension
+ // TODO(jie): INT32 should be converted?
+ tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
+ if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
+ tf_dtype != tensorflow::DataType::DT_HALF) {
+ return tensorflow::errors::Unimplemented(
+ "data type is not supported, for node " + node_def.name() + " got " +
+ tensorflow::DataTypeString(tf_dtype));
+ }
- TRT_ShapedWeights shape = inputs.at(1).weights();
+ bool transpose_a = attrs.get<bool>("adj_x");
+ bool transpose_b = attrs.get<bool>("adj_y");
- TFAttrs attrs(node_def);
+ auto dims = inputs.at(0).shape();
+ if (dims.nbDims == 1) { // NC * CK is only supported through fully connected
+ if (transpose_a == false && inputs.at(0).is_tensor() &&
+ inputs.at(1).is_weights()) {
+ return ConvertMatMulHelper(ctx, inputs.at(0), inputs.at(1).weights(),
+ transpose_b, node_def.name(), outputs);
+ } else {
+ return tensorflow::errors::InvalidArgument(
+ "Invalid configuration for MatMul, at: " + node_def.name());
+ }
+ }
- auto padding_type = attrs.get<tensorflow::DataType>("Tshape");
+ const nvinfer1::ITensor* tensor_l;
+ const nvinfer1::ITensor* tensor_r;
+ auto dims_l = inputs.at(0).shape();
+ auto dims_r = inputs.at(1).shape();
+ if (inputs.at(0).is_weights()) {
+ if (inputs.at(0).shape().d[0] != 1) {
+ return tensorflow::errors::InvalidArgument(
+ "Input 0 as weight assumes broadcast across batch for MatMul, at: " +
+ node_def.name());
+ } else {
+ for (int i = 0; i < dims_l.nbDims - 1; i++) {
+ dims_l.d[i] = dims_l.d[i + 1];
+ }
+ dims_l.nbDims--;
+ }
+ }
+ if (inputs.at(1).is_weights()) {
+ if (inputs.at(1).shape().d[0] != 1) {
+ return tensorflow::errors::InvalidArgument(
+ "Input 1 as weight assumes broadcast across batch for MatMul, at: " +
+ node_def.name());
+ } else {
+ for (int i = 0; i < dims_r.nbDims - 1; i++) {
+ dims_r.d[i] = dims_r.d[i + 1];
+ }
+ dims_r.nbDims--;
+ }
+ }
- if (shape.shape_.nbDims != 1)
- return tensorflow::errors::InvalidArgument(
- "reshape new shape is not 1 dimensional, at " + node_def.name());
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, inputs.at(0), dims_l, &tensor_l),
+ node_def.name());
+ TFTRT_RETURN_ERROR_IF_FALSE(
+ PrepareTensorForShape(ctx, inputs.at(1), dims_r, &tensor_r),
+ node_def.name());
- // Only expect to handle INT32 as attributes for now
- if (padding_type != tensorflow::DataType::DT_INT32)
- return tensorflow::errors::Unimplemented(
- "reshape new shape supports only DT_INT32, at " + node_def.name());
+ nvinfer1::IMatrixMultiplyLayer* layer = ctx.network()->addMatrixMultiply(
+ *const_cast<nvinfer1::ITensor*>(tensor_l), transpose_a,
+ *const_cast<nvinfer1::ITensor*>(tensor_r), transpose_b);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ return tensorflow::Status::OK();
+}
+#endif
- auto shape_data = static_cast<int*>(const_cast<void*>(shape.GetValues()));
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertSoftmax(
+ Converter& ctx, const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- if (shape_data[0] != -1)
+ int nbDims = tensor->getDimensions().nbDims;
+ if (nbDims == 0) {
return tensorflow::errors::InvalidArgument(
- "reshape new shape first dimension is not -1, at " + node_def.name());
+ "TensorRT Softmax cannot apply on batch dimension, at" +
+ node_def.name());
+ }
+ nvinfer1::ISoftMaxLayer* layer =
+ ctx.network()->addSoftMax(*const_cast<nvinfer1::ITensor*>(tensor));
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+ // Tensorflow SoftMax assumes applying softmax on the last dimension.
+ layer->setAxes(1 << (nbDims - 1));
- auto shape_num_dims = shape.shape_.d[0];
- VLOG(2) << "shape dimensions: " << shape_num_dims;
- int volume_w = 1;
- for (int i = 1; i < shape.shape_.d[0]; i++) volume_w *= shape_data[i];
+ nvinfer1::ITensor* output_tensor = layer->getOutput(0);
+ outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ return tensorflow::Status::OK();
+}
+#endif
- int volume_t = 1;
- for (int i = 0; i < dims.nbDims; i++) volume_t *= dims.d[i];
+#if NV_TENSORRT_MAJOR > 3
+tensorflow::Status ConvertTopK(Converter& ctx,
+ const tensorflow::NodeDef& node_def,
+ const std::vector<TRT_TensorOrWeights>& inputs,
+ std::vector<TRT_TensorOrWeights>* outputs) {
+ const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
- VLOG(2) << "volume: " << volume_t << " volume weights: " << volume_w;
- if (volume_w != volume_t)
+ int nbDims = tensor->getDimensions().nbDims;
+ if (nbDims == 0) {
return tensorflow::errors::InvalidArgument(
- "volume does not agree between tensor and new shape, at " +
- node_def.name());
+ "TensorRT TopK cannot apply on batch dimension, at" + node_def.name());
+ }
- nvinfer1::IShuffleLayer* layer =
- ctx.network()->addShuffle(*const_cast<nvinfer1::ITensor*>(tensor));
+ TRT_ShapedWeights k_w = inputs.at(1).weights();
+ int k = *(static_cast<int*>(const_cast<void*>(k_w.GetValues())));
- nvinfer1::Dims reshape_dims;
- VLOG(2) << "new dimension: " << shape_num_dims - 1;
- reshape_dims.nbDims = shape_num_dims - 1;
- for (int32_t i = 0; i < reshape_dims.nbDims; ++i) {
- reshape_dims.d[i] = shape_data[i + 1];
+ nvinfer1::TopKOperation op;
+ uint32_t reducedAxes = 0;
+ if (node_def.op() == "TopKV2") {
+ op = nvinfer1::TopKOperation::kMAX;
+ reducedAxes |= 1 << (nbDims - 1);
+ } else {
+ return tensorflow::errors::Unimplemented(
+ "Operation: " + node_def.op() +
+ " not implemented, at: " + node_def.name());
}
- layer->setReshapeDimensions(reshape_dims);
- VLOG(2) << "new dimension: " << shape_num_dims - 1;
- nvinfer1::ITensor* output_tensor = layer->getOutput(0);
- auto dims_output = output_tensor->getDimensions();
- VLOG(2) << "output tensor dimension:" << dims_output.nbDims;
- outputs->push_back(TRT_TensorOrWeights(output_tensor));
+ nvinfer1::ITopKLayer* layer = ctx.network()->addTopK(
+ *const_cast<nvinfer1::ITensor*>(tensor), op, k, reducedAxes);
+ TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
+
+ nvinfer1::ITensor* output_value_tensor = layer->getOutput(0);
+ nvinfer1::ITensor* output_indices_tensor = layer->getOutput(1);
+ outputs->push_back(TRT_TensorOrWeights(output_value_tensor));
+ outputs->push_back(TRT_TensorOrWeights(output_indices_tensor));
return tensorflow::Status::OK();
}
+#endif
void Converter::register_op_converters() {
// vgg_16 slim implementation
- op_registry_["Placeholder"] = ConvertPlaceholder;
op_registry_["Conv2D"] = ConvertConv2D;
op_registry_["DepthwiseConv2dNative"] = ConvertConv2DDepthwise;
op_registry_["Relu"] = ConvertActivation;
op_registry_["MaxPool"] = ConvertPool;
op_registry_["AvgPool"] = ConvertPool;
- // This could be really handled as ConvertBinary
op_registry_["BiasAdd"] = ConvertScale;
op_registry_["Const"] = ConvertConst;
// TODO(ben,jie): this is a temp hack.
@@ -2113,17 +2609,39 @@ void Converter::register_op_converters() {
op_registry_["Add"] = ConvertBinary;
op_registry_["Mul"] = ConvertBinary;
op_registry_["Sub"] = ConvertBinary;
- op_registry_["Rsqrt"] = ConvertUnary;
- op_registry_["Mean"] = ConvertReduce;
op_registry_["Pad"] = ConvertPad;
- // TODO(ben,jie): Add more ops
op_registry_["ConcatV2"] = ConvertConcat;
- op_registry_["MatMul"] = ConvertMatMul;
- op_registry_["Reshape"] = ConvertReshape;
op_registry_["FusedBatchNorm"] = ConvertFusedBatchNorm;
op_registry_["FusedBatchNormV2"] = ConvertFusedBatchNorm;
+ op_registry_["Div"] = ConvertBinary;
+ op_registry_["RealDiv"] = ConvertBinary;
+
+ op_registry_["Rsqrt"] = ConvertUnary;
+ op_registry_["Reciprocal"] = ConvertUnary;
+ op_registry_["Exp"] = ConvertUnary;
+ op_registry_["Log"] = ConvertUnary;
+ op_registry_["Sqrt"] = ConvertUnary;
+ op_registry_["Abs"] = ConvertUnary;
+ op_registry_["Neg"] = ConvertUnary;
+#if NV_TENSORRT_MAJOR == 3
+ op_registry_["Mean"] = ConvertReducePool;
+#endif
+#if NV_TENSORRT_MAJOR > 3
+ op_registry_["Sum"] = ConvertReduce;
+ op_registry_["Prod"] = ConvertReduce;
+ op_registry_["Max"] = ConvertReduce;
+ op_registry_["Min"] = ConvertReduce;
+ op_registry_["Mean"] = ConvertReduce;
+ op_registry_["Maximum"] = ConvertBinary;
+ op_registry_["Minimum"] = ConvertBinary;
+ op_registry_["Softmax"] = ConvertSoftmax;
+ op_registry_["MatMul"] = ConvertMatMul;
+ op_registry_["BatchMatMul"] = ConvertBatchMatMul;
+ op_registry_["TopKV2"] = ConvertTopK;
+#endif
+
plugin_converter_ = ConvertPlugin;
}
@@ -2177,25 +2695,22 @@ tensorflow::Status ConvertGraphDefToEngine(
(node_def.op() == "Placeholder")) {
nvinfer1::DimsCHW input_dim_pseudo_chw;
for (int i = 0; i < 8; i++) input_dim_pseudo_chw.d[i] = 0;
- nvinfer1::DataType dtype(nvinfer1::DataType::kFLOAT);
- auto type_status =
- ConvertDType(node_def.attr().at("dtype").type(), &dtype);
- if (type_status != tensorflow::Status::OK()) {
- LOG(WARNING) << "Type conversion failed for " << node_name;
- return type_status;
- }
int32 slot_number = -1;
- if (!tensorflow::strings::safe_strto32(node_name.c_str() + 8,
- &slot_number)) {
- LOG(ERROR) << "Failed to parse slot number from " << node_name
- << " +8= " << node_name.c_str() + 8;
+ if (!tensorflow::strings::safe_strto32(
+ node_name.c_str() + strlen(kInputPHName), &slot_number)) {
+ return tensorflow::errors::InvalidArgument(
+ "Failed to parse slot number from ", node_name);
}
+ nvinfer1::DataType dtype;
auto shape = input_shapes.at(slot_number);
- if (shape.dims() > 8) {
- LOG(ERROR) << "Tensor rank is greater than 8 for " << node_name
- << " at input slot " << slot_number;
- return tensorflow::errors::OutOfRange(
- "Input tensor rank is greater than 8");
+ auto status = ValidateInputProperties(
+ shape, node_def.attr().at("dtype").type(), &dtype);
+ if (!status.ok()) {
+ const string error_message =
+ StrCat("Validation failed for ", node_name, " and input slot ",
+ slot_number, ": ", status.error_message());
+ LOG(WARNING) << error_message;
+ return Status(status.code(), error_message);
}
if (VLOG_IS_ON(1)) {
string dim_str("dims=");
@@ -2226,10 +2741,10 @@ tensorflow::Status ConvertGraphDefToEngine(
} else if (tensorflow::str_util::StartsWith(node_name, kOutputPHName) &&
(node_def.op() == "Identity")) {
int32 slot_number = -1;
- if (!tensorflow::strings::safe_strto32(node_name.c_str() + 9,
- &slot_number)) {
- LOG(ERROR) << "Failed to parse slot number from " << node_name
- << " +9=" << node_name.c_str() + 9;
+ if (!tensorflow::strings::safe_strto32(
+ node_name.c_str() + strlen(kOutputPHName), &slot_number)) {
+ return tensorflow::errors::InvalidArgument(
+ "Failed to parse slot number from ", node_name);
}
if (output_tensors.size() <= slot_number) {
output_tensors.resize(slot_number + 1);
@@ -2288,38 +2803,20 @@ tensorflow::Status ConvertSegmentToGraphDef(
"Cannot find node with id ", connection.outside_id, " in the graph.");
}
// Updates the shape and data types of input/output connections.
- tensorflow::DataType input_type = tensorflow::DT_FLOAT;
+ tensorflow::DataType dtype;
tensorflow::PartialTensorShape partial_shape;
if (connection.is_input_edge) {
- if (graph_properties.HasOutputProperties(connection.outside_node_name)) {
- auto output_params =
- graph_properties.GetOutputProperties(connection.outside_node_name);
- auto out_shape = output_params.at(connection.outside_port);
- input_type = out_shape.dtype();
- std::vector<tensorflow::int64> dims;
- partial_shape = out_shape.shape();
- connection.outside_shape = partial_shape;
- } else {
- VLOG(0) << "Unknown output shape" << outside_node->name();
- input_type = graph->FindNodeId(connection.outside_id)
- ->output_type(connection.outside_port);
- }
- connection.connection_type = input_type;
-
- } else { // output edge
- if (graph_properties.HasInputProperties(connection.outside_node_name)) {
- auto input_params =
- graph_properties.GetInputProperties(connection.outside_node_name);
- auto in_shape = input_params.at(connection.outside_port);
- input_type = in_shape.dtype();
- partial_shape = in_shape.shape();
- connection.inside_shape = partial_shape;
- } else {
- input_type = graph->FindNodeId(connection.inside_id)
- ->output_type(connection.outside_port);
- }
- connection.connection_type = input_type;
+ GetInputProperties(graph_properties,
+ graph->FindNodeId(connection.outside_id),
+ connection.outside_port, &partial_shape, &dtype);
+
+ } else {
+ GetOutputProperties(graph_properties,
+ graph->FindNodeId(connection.outside_id),
+ connection.outside_port, &partial_shape, &dtype);
}
+ connection.outside_shape = partial_shape;
+ connection.connection_type = dtype;
// Add dummy input/output nodes to the segment graphdef.
if (connection.is_input_edge) {
@@ -2335,7 +2832,7 @@ tensorflow::Status ConvertSegmentToGraphDef(
auto seg_node = segment_def->add_node();
tensorflow::NodeDefBuilder builder(node_name, "Placeholder");
auto status = builder.Attr("shape", partial_shape)
- .Attr("dtype", input_type)
+ .Attr("dtype", dtype)
.Finalize(seg_node);
VLOG(1) << "Constructing input " << node_name << " for the edge "
<< connection.outside_node_name << ":" << connection.outside_port
@@ -2353,7 +2850,7 @@ tensorflow::Status ConvertSegmentToGraphDef(
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
tensorflow::NodeDefBuilder builder(node_name, "Identity");
- auto status = builder.Input(connection.inside_node_name, 0, input_type)
+ auto status = builder.Input(connection.inside_node_name, 0, dtype)
.Finalize(seg_node);
VLOG(1) << "Constructing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
@@ -2391,6 +2888,38 @@ tensorflow::Status ConvertSegmentToGraphDef(
return tensorflow::Status::OK();
}
+bool InputEdgeValidator::operator()(const tensorflow::Edge* in_edge) const {
+ if (in_edge->IsControlEdge()) return true;
+ PartialTensorShape shape;
+ tensorflow::DataType dtype;
+ GetInputProperties(graph_properties_, in_edge->src(), in_edge->src_output(),
+ &shape, &dtype);
+ nvinfer1::DataType trt_dtype;
+ Status status = ValidateInputProperties(shape, dtype, &trt_dtype);
+ if (!status.ok()) {
+ VLOG(2) << "--> Need to remove input node " << in_edge->dst()->name()
+ << ": " << status;
+ return false;
+ }
+ if (shape.dims() < 3 && in_edge->src()->type_string() != "Const") {
+ VLOG(2) << "--> Need to remove input node " << in_edge->dst()->name()
+ << " which has an input at port " << in_edge->dst_input()
+ << " with #dim<3 and is not a const: " << shape;
+ return false;
+ }
+ return true;
+}
+
+bool OutputEdgeValidator::operator()(const tensorflow::Edge* out_edge) const {
+ if (out_edge->IsControlEdge()) return true;
+ if (out_edge->src()->type_string() == "Const") {
+ VLOG(2) << "--> Need to remove output node " << out_edge->src()->name()
+ << " which is a Const.";
+ return false;
+ }
+ return true;
+}
+
} // namespace convert
} // namespace tensorrt
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/convert/convert_nodes.h b/tensorflow/contrib/tensorrt/convert/convert_nodes.h
index 1a4c0e755d..6ae60ec352 100644
--- a/tensorflow/contrib/tensorrt/convert/convert_nodes.h
+++ b/tensorflow/contrib/tensorrt/convert/convert_nodes.h
@@ -23,6 +23,7 @@ limitations under the License.
#include <vector>
#include "tensorflow/contrib/tensorrt/convert/utils.h"
+#include "tensorflow/contrib/tensorrt/log/trt_logger.h"
#include "tensorflow/contrib/tensorrt/resources/trt_allocator.h"
#include "tensorflow/contrib/tensorrt/resources/trt_int8_calibrator.h"
#include "tensorflow/core/framework/graph.pb.h"
@@ -104,6 +105,8 @@ struct EngineInfo {
// topological order.
// - segment_def: the output GraphDef, whose non-input/output nodedefs will be
// sorted in topological order.
+//
+// TODO(aaroey): add tests to validate these properties.
tensorflow::Status ConvertSegmentToGraphDef(
const tensorflow::Graph* graph,
const tensorflow::grappler::GraphProperties& graph_properties,
@@ -128,6 +131,30 @@ tensorflow::Status ConvertGraphDefToEngine(
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine,
bool* convert_successfully);
+// Helper class for the segmenter to determine whether an input edge to the TRT
+// segment is valid.
+class InputEdgeValidator {
+ public:
+ InputEdgeValidator(const grappler::GraphProperties& graph_properties)
+ : graph_properties_(graph_properties) {}
+
+ // Return true if the specified edge is eligible to be an input edge of the
+ // TRT segment.
+ bool operator()(const tensorflow::Edge* in_edge) const;
+
+ private:
+ const grappler::GraphProperties& graph_properties_;
+};
+
+// Helper class for the segmenter to determine whether an output edge from the
+// TRT segment is valid.
+class OutputEdgeValidator {
+ public:
+ // Return true if the specified edge is eligible to be an output edge of the
+ // TRT segment.
+ bool operator()(const tensorflow::Edge* out_edge) const;
+};
+
} // namespace convert
} // namespace tensorrt
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc b/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
index ec9dbfa13b..044c736c03 100644
--- a/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
+++ b/tensorflow/contrib/tensorrt/convert/trt_optimization_pass.cc
@@ -17,6 +17,7 @@ limitations under the License.
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
+#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
@@ -232,8 +233,25 @@ tensorflow::Status TRTOptimizationPass::Optimize(
tensorflow::grappler::GraphProperties static_graph_properties(item);
TF_RETURN_IF_ERROR(static_graph_properties.InferStatically(true));
tensorflow::tensorrt::convert::ConversionParams cp;
+
+ std::vector<string> nodes_to_preserve;
+ for (const auto& n : item.NodesToPreserve()) {
+ auto tokens = str_util::Split(n, ":");
+ string s = tokens.at(0);
+ for (int i = 1; i < tokens.size() - 1; ++i) {
+ StrAppend(&s, ":", tokens.at(i));
+ }
+ int dumm_port = -1;
+ // If the last token is not an integer, it must be part of the name.
+ // Otherwise it is port number.
+ if (tokens.size() > 1 &&
+ !strings::safe_strto32(tokens.back(), &dumm_port)) {
+ StrAppend(&s, ":", tokens.back());
+ }
+ nodes_to_preserve.push_back(s);
+ }
cp.input_graph_def = &item.graph;
- cp.output_names = &item.fetch;
+ cp.output_names = &nodes_to_preserve;
cp.max_batch_size = maximum_batch_size_;
cp.max_workspace_size_bytes = maximum_workspace_size_;
cp.output_graph_def = optimized_graph;
diff --git a/tensorflow/contrib/tensorrt/convert/utils.cc b/tensorflow/contrib/tensorrt/convert/utils.cc
new file mode 100644
index 0000000000..24591cf84b
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/convert/utils.cc
@@ -0,0 +1,35 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/tensorrt/convert/utils.h"
+
+namespace tensorflow {
+namespace tensorrt {
+
+bool IsGoogleTensorRTEnabled() {
+ // TODO(laigd): consider also checking if tensorrt shared libraries are
+ // accessible. We can then direct users to this function to make sure they can
+ // safely write code that uses tensorrt conditionally. E.g. if it does not
+ // check for for tensorrt, and user mistakenly uses tensorrt, they will just
+ // crash and burn.
+#ifdef GOOGLE_TENSORRT
+ return true;
+#else
+ return false;
+#endif
+}
+
+} // namespace tensorrt
+} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/convert/utils.h b/tensorflow/contrib/tensorrt/convert/utils.h
index f601c06701..8b5f4d614a 100644
--- a/tensorflow/contrib/tensorrt/convert/utils.h
+++ b/tensorflow/contrib/tensorrt/convert/utils.h
@@ -31,6 +31,8 @@ struct TrtDestroyer {
template <typename T>
using TrtUniquePtrType = std::unique_ptr<T, TrtDestroyer<T>>;
+bool IsGoogleTensorRTEnabled();
+
} // namespace tensorrt
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/custom_plugin_examples/BUILD b/tensorflow/contrib/tensorrt/custom_plugin_examples/BUILD
index a89cf3ab8b..69058c5826 100644
--- a/tensorflow/contrib/tensorrt/custom_plugin_examples/BUILD
+++ b/tensorflow/contrib/tensorrt/custom_plugin_examples/BUILD
@@ -112,7 +112,9 @@ cuda_py_test(
],
tags = [
"manual",
+ "no_windows",
"noguitar",
+ "nomac",
"notap",
],
)
diff --git a/tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op_kernel.cu.cc b/tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op_kernel.cu.cc
index 988b35f74f..2de7973750 100644
--- a/tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op_kernel.cu.cc
+++ b/tensorflow/contrib/tensorrt/custom_plugin_examples/inc_op_kernel.cu.cc
@@ -65,7 +65,7 @@ class IncPluginTRT : public OpKernel {
reinterpret_cast<const cudaStream_t*>(context->op_device_context()
->stream()
->implementation()
- ->CudaStreamMemberHack()));
+ ->GpuStreamMemberHack()));
IncrementKernel(input_tensor.flat<float>().data(), inc_,
output_tensor->flat<float>().data(),
input_shape.num_elements(), *stream);
diff --git a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
index 8a17eb02f1..646d62483f 100644
--- a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
+++ b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.cc
@@ -15,9 +15,11 @@ limitations under the License.
#include "tensorflow/contrib/tensorrt/kernels/trt_engine_op.h"
#include <algorithm>
+
#include "tensorflow/contrib/tensorrt/convert/convert_nodes.h"
#include "tensorflow/contrib/tensorrt/convert/utils.h"
#include "tensorflow/contrib/tensorrt/log/trt_logger.h"
+#include "tensorflow/contrib/tensorrt/plugin/trt_plugin_factory.h"
#include "tensorflow/contrib/tensorrt/resources/trt_resource_manager.h"
#include "tensorflow/contrib/tensorrt/resources/trt_resources.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
@@ -230,7 +232,7 @@ void TRTEngineOp::ExecuteCalibration(tensorflow::OpKernelContext* ctx,
reinterpret_cast<const cudaStream_t*>(ctx->op_device_context()
->stream()
->implementation()
- ->CudaStreamMemberHack()));
+ ->GpuStreamMemberHack()));
calib_res->calibrator_->setBatch(input_data, *stream);
VLOG(2) << "Passed calibration data";
ExecuteNativeSegment(ctx, helper);
@@ -316,6 +318,11 @@ void TRTEngineOp::ComputeAsync(tensorflow::OpKernelContext* ctx,
ctx->SetStatus(tensorflow::errors::InvalidArgument(
"INT8 inputs are not supported!"));
return;
+#if NV_TENSORRT_MAJOR > 3
+ case nvinfer1::DataType::kINT32:
+ buffers[binding_index] = (void*)(input_tensor.flat<int32>().data());
+ break;
+#endif
default:
LOG(ERROR) << "Unknown TRT data type: " << int(dtype);
ctx->SetStatus(tensorflow::errors::InvalidArgument(
@@ -368,6 +375,12 @@ void TRTEngineOp::ComputeAsync(tensorflow::OpKernelContext* ctx,
ctx->SetStatus(tensorflow::errors::InvalidArgument(
"INT8 outputs are not supported!"));
return;
+#if NV_TENSORRT_MAJOR > 3
+ case nvinfer1::DataType::kINT32:
+ buffers[binding_index] =
+ reinterpret_cast<void*>(output_tensor->flat<int32>().data());
+ break;
+#endif
default:
LOG(ERROR) << "Unknown TRT data type: " << static_cast<int>(dtype);
ctx->SetStatus(tensorflow::errors::InvalidArgument(
@@ -380,7 +393,7 @@ void TRTEngineOp::ComputeAsync(tensorflow::OpKernelContext* ctx,
reinterpret_cast<const cudaStream_t*>(ctx->op_device_context()
->stream()
->implementation()
- ->CudaStreamMemberHack()));
+ ->GpuStreamMemberHack()));
// TODO(jie): trt enqueue does not return error
auto& trt_execution_context_ptr = engine_ctx_pair.second;
@@ -446,7 +459,8 @@ TRTEngineOp::EngineCtxPair& TRTEngineOp::GetEngine(int batch_size,
#endif
TrtUniquePtrType<nvinfer1::ICudaEngine> static_engine(
infer->deserializeCudaEngine(serialized_segment_.c_str(),
- serialized_segment_.size(), nullptr));
+ serialized_segment_.size(),
+ PluginFactoryTensorRT::GetInstance()));
auto raw_static_engine = static_engine.get();
const auto max_batch_size = raw_static_engine->getMaxBatchSize();
engine_map_[max_batch_size] = {
diff --git a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.h b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.h
index 6fe318be6a..9265250605 100644
--- a/tensorflow/contrib/tensorrt/kernels/trt_engine_op.h
+++ b/tensorflow/contrib/tensorrt/kernels/trt_engine_op.h
@@ -81,7 +81,7 @@ class TRTEngineOp : public AsyncOpKernel {
std::vector<string> output_nodes_;
// keep device allocator for TRT.
- std::unique_ptr<TRTDeviceAllocator> allocator_;
+ std::unique_ptr<TRTBaseAllocator> allocator_;
// serialized protobuf segment or trt engine depending on static_engine_ flag.
string serialized_segment_;
diff --git a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
index 383635f428..e0c7b62723 100644
--- a/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
+++ b/tensorflow/contrib/tensorrt/ops/trt_engine_op.cc
@@ -42,8 +42,14 @@ REGISTER_OP("TRTEngineOp")
.Attr("precision_mode: {'FP32', 'FP16', 'INT8', 'INT8CALIB'}")
.Attr("calibration_data: string = ''")
.Input("in_tensor: InT")
- .Output("out_tensor: OutT")
- .SetShapeFn(shape_inference::TRTEngineOpShapeInference);
+ .Output("out_tensor: OutT");
+// TODO(jie): TF requires concrete output shape for concrete input shapes.
+// This is tricky for batch dimension, since we cannot ensure which input
+// would carry the correct batch dimension (for the current stage of the
+// implementation, we do require all input tensor to carry the same batch
+// size, but this could change in the future). Hence we disable shape
+// inference function as a workaround.
+// .SetShapeFn(shape_inference::TRTEngineOpShapeInference);
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/plugin/trt_plugin_factory.cc b/tensorflow/contrib/tensorrt/plugin/trt_plugin_factory.cc
index 2bc591484d..cccc912262 100644
--- a/tensorflow/contrib/tensorrt/plugin/trt_plugin_factory.cc
+++ b/tensorflow/contrib/tensorrt/plugin/trt_plugin_factory.cc
@@ -65,9 +65,6 @@ bool PluginFactoryTensorRT::RegisterPlugin(
void PluginFactoryTensorRT::DestroyPlugins() {
tensorflow::mutex_lock lock(instance_m_);
- for (auto& owned_plugin_ptr : owned_plugins_) {
- owned_plugin_ptr.release();
- }
owned_plugins_.clear();
}
diff --git a/tensorflow/contrib/tensorrt/python/__init__.py b/tensorflow/contrib/tensorrt/python/__init__.py
index 0b2321b5fc..fe4fa166a1 100644
--- a/tensorflow/contrib/tensorrt/python/__init__.py
+++ b/tensorflow/contrib/tensorrt/python/__init__.py
@@ -22,4 +22,5 @@ from __future__ import print_function
from tensorflow.contrib.tensorrt.python.ops import trt_engine_op
from tensorflow.contrib.tensorrt.python.trt_convert import calib_graph_to_infer_graph
from tensorflow.contrib.tensorrt.python.trt_convert import create_inference_graph
+from tensorflow.contrib.tensorrt.python.trt_convert import is_tensorrt_enabled
# pylint: enable=unused-import,line-too-long
diff --git a/tensorflow/contrib/tensorrt/python/trt_convert.py b/tensorflow/contrib/tensorrt/python/trt_convert.py
index 79f512dbcf..2b67931661 100644
--- a/tensorflow/contrib/tensorrt/python/trt_convert.py
+++ b/tensorflow/contrib/tensorrt/python/trt_convert.py
@@ -23,6 +23,7 @@ import six as _six
from tensorflow.contrib.tensorrt.wrap_conversion import calib_convert
from tensorflow.contrib.tensorrt.wrap_conversion import get_linked_tensorrt_version
from tensorflow.contrib.tensorrt.wrap_conversion import get_loaded_tensorrt_version
+from tensorflow.contrib.tensorrt.wrap_conversion import is_tensorrt_enabled
from tensorflow.contrib.tensorrt.wrap_conversion import trt_convert
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
diff --git a/tensorflow/contrib/tensorrt/resources/trt_allocator.cc b/tensorflow/contrib/tensorrt/resources/trt_allocator.cc
index 9f115990c3..d8f97bfbbc 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_allocator.cc
+++ b/tensorflow/contrib/tensorrt/resources/trt_allocator.cc
@@ -19,12 +19,42 @@ limitations under the License.
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
+#include "cuda/include/cuda_runtime_api.h"
+#endif // GOOGLE_TENSORRT
+#endif // GOOGLE_CUDA
+
+namespace tensorflow {
+namespace tensorrt {
+
+// std::align is not supported, so this method mimic its behavior.
+void* Align(size_t alignment, size_t size, void*& ptr, size_t& space) {
+ QCHECK_GT(alignment, 0) << "alignment must be greater than 0.";
+ QCHECK_EQ(0, alignment & (alignment - 1)) << "Alignment must be power of 2.";
+ QCHECK_GT(size, 0) << "size must be greater than 0.";
+ QCHECK(ptr) << "ptr must not be nullptr.";
+ QCHECK_GT(space, 0) << "space must be greater than 0.";
+ const uintptr_t ptr_val = reinterpret_cast<uintptr_t>(ptr);
+ QCHECK_GE(ptr_val + space, ptr_val) << "Provided space overflows.";
+ if (size > space) return nullptr;
+ const uintptr_t aligned_ptr_val = ((ptr_val + alignment - 1) & -alignment);
+ if (aligned_ptr_val > ptr_val + space - size) return nullptr;
+ ptr = reinterpret_cast<void*>(aligned_ptr_val);
+ const uintptr_t diff = aligned_ptr_val - ptr_val;
+ space -= diff;
+ return ptr;
+}
+
+} // namespace tensorrt
+} // namespace tensorflow
+
+#if GOOGLE_CUDA
+#if GOOGLE_TENSORRT
#if NV_TENSORRT_MAJOR > 2
-#include "cuda/include/cuda_runtime_api.h"
namespace tensorflow {
namespace tensorrt {
+
void* TRTCudaAllocator::allocate(uint64_t size, uint64_t alignment,
uint32_t flags) {
assert((alignment & (alignment - 1)) == 0); // zero or a power of 2.
@@ -37,10 +67,23 @@ void TRTCudaAllocator::free(void* memory) { cudaFree(memory); }
void* TRTDeviceAllocator::allocate(uint64_t size, uint64_t alignment,
uint32_t flags) {
+ // WAR for allocator alignment requirement. Certain cuda API calls require GPU
+ // memory with alignemtn to cudaDeviceProp::textureAlignment.
+ // See issue #20856
+ alignment = 512;
assert((alignment & (alignment - 1)) == 0); // zero or a power of 2.
- void* mem = allocator_->AllocateRaw(alignment, size);
- VLOG(2) << "Allocated " << size << " bytes with alignment " << alignment
- << " @ " << mem;
+ size_t total_size = size + alignment;
+ void* mem = allocator_->AllocateRaw(alignment, total_size);
+ if (!mem) return nullptr;
+
+ void* alloc_mem = mem;
+ QCHECK(Align(alignment, size, mem, total_size));
+ if (mem != alloc_mem) {
+ QCHECK(mem_map_.insert({mem, alloc_mem}).second);
+ }
+ VLOG(2) << "Allocated " << total_size << " bytes memory @" << alloc_mem
+ << "; aligned to " << size << " bytes @" << mem << " with alignment "
+ << alignment;
return mem;
}
@@ -51,12 +94,20 @@ TRTDeviceAllocator::TRTDeviceAllocator(tensorflow::Allocator* allocator)
void TRTDeviceAllocator::free(void* memory) {
VLOG(2) << "Deallocating @ " << memory;
- allocator_->DeallocateRaw(memory);
+ // allocated memory adjusted for alignment, restore the original pointer
+ if (memory) {
+ auto alloc_mem = mem_map_.find(memory);
+ if (alloc_mem != mem_map_.end()) {
+ memory = alloc_mem->second;
+ mem_map_.erase(alloc_mem->first);
+ }
+ allocator_->DeallocateRaw(memory);
+ }
}
} // namespace tensorrt
} // namespace tensorflow
#endif
-#endif
-#endif
+#endif // GOOGLE_TENSORRT
+#endif // GOOGLE_CUDA
diff --git a/tensorflow/contrib/tensorrt/resources/trt_allocator.h b/tensorflow/contrib/tensorrt/resources/trt_allocator.h
index c5d2cec730..6f94492083 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_allocator.h
+++ b/tensorflow/contrib/tensorrt/resources/trt_allocator.h
@@ -16,13 +16,25 @@ limitations under the License.
#ifndef TENSORFLOW_CONTRIB_TENSORRT_RESOURCES_TRT_ALLOCATOR_H_
#define TENSORFLOW_CONTRIB_TENSORRT_RESOURCES_TRT_ALLOCATOR_H_
-#include "tensorflow/contrib/tensorrt/log/trt_logger.h"
+#include <unordered_map>
+
#include "tensorflow/core/framework/allocator.h"
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorrt/include/NvInfer.h"
+#endif // GOOGLE_TENSORRT
+#endif // GOOGLE_CUDA
+
+namespace tensorflow {
+namespace tensorrt {
+// std::align is not supported, so this function mimic its behavior.
+void* Align(size_t alignment, size_t size, void*& ptr, size_t& space);
+} // namespace tensorrt
+} // namespace tensorflow
+#if GOOGLE_CUDA
+#if GOOGLE_TENSORRT
#if NV_TENSORRT_MAJOR == 3
// Define interface here temporarily until TRT 4.0 is released
namespace nvinfer1 {
@@ -37,7 +49,14 @@ class IGpuAllocator {
namespace tensorflow {
namespace tensorrt {
-class TRTCudaAllocator : public nvinfer1::IGpuAllocator {
+class TRTBaseAllocator : public nvinfer1::IGpuAllocator {
+ // Base allocator class so we can have a virtual destructor;
+ public:
+ // python wrapper seems to be not happy with an pure virtual destructor;
+ virtual ~TRTBaseAllocator() = default;
+};
+
+class TRTCudaAllocator : public TRTBaseAllocator {
// Allocator implementation that is using cuda allocator instead of device
// allocator in case we can't get device allocator from TF.
public:
@@ -47,10 +66,13 @@ class TRTCudaAllocator : public nvinfer1::IGpuAllocator {
void free(void* memory) override;
};
-class TRTDeviceAllocator : public nvinfer1::IGpuAllocator {
+class TRTDeviceAllocator : public TRTBaseAllocator {
// Allocator implementation wrapping TF device allocators.
public:
TRTDeviceAllocator(tensorflow::Allocator* allocator);
+
+ // TODO(aaroey): base class doesn't have a virtual destructor, work with
+ // Nvidia to fix it.
virtual ~TRTDeviceAllocator() {
VLOG(1) << "Destroying allocator attached to " << allocator_->Name();
}
@@ -59,6 +81,9 @@ class TRTDeviceAllocator : public nvinfer1::IGpuAllocator {
private:
tensorflow::Allocator* allocator_;
+
+ // supporting alignment from allocation request requires a map to free;
+ std::unordered_map<void*, void*> mem_map_;
};
} // namespace tensorrt
diff --git a/tensorflow/contrib/tensorrt/resources/trt_allocator_test.cc b/tensorflow/contrib/tensorrt/resources/trt_allocator_test.cc
new file mode 100644
index 0000000000..f515ed03f2
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/resources/trt_allocator_test.cc
@@ -0,0 +1,79 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/contrib/tensorrt/resources/trt_allocator.h"
+
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace tensorrt {
+
+bool RunTest(const size_t alignment, const size_t size,
+ const intptr_t orig_ptr_val, const size_t orig_space) {
+ void* const orig_ptr = reinterpret_cast<void*>(orig_ptr_val);
+ void* ptr = orig_ptr;
+ size_t space = orig_space;
+ void* result = Align(alignment, size, ptr, space);
+ if (result == nullptr) {
+ EXPECT_EQ(orig_ptr, ptr);
+ EXPECT_EQ(orig_space, space);
+ return false;
+ } else {
+ EXPECT_EQ(result, ptr);
+ const intptr_t ptr_val = reinterpret_cast<intptr_t>(ptr);
+ EXPECT_EQ(0, ptr_val % alignment);
+ EXPECT_GE(ptr_val, orig_ptr_val);
+ EXPECT_GE(space, size);
+ EXPECT_LE(space, orig_space);
+ EXPECT_EQ(ptr_val + space, orig_ptr_val + orig_space);
+ return true;
+ }
+}
+
+TEST(TRTAllocatorTest, Align) {
+ for (const size_t space :
+ {1, 2, 3, 4, 7, 8, 9, 10, 16, 32, 511, 512, 513, 700, 12345}) {
+ for (size_t alignment = 1; alignment <= space * 4; alignment *= 2) {
+ for (const intptr_t ptr_val :
+ {1ul, alignment == 1 ? 1ul : alignment - 1, alignment, alignment + 1,
+ alignment + (alignment / 2)}) {
+ if (ptr_val % alignment == 0) {
+ for (const size_t size :
+ {1ul, space == 1 ? 1ul : space - 1, space, space + 1}) {
+ EXPECT_EQ(space >= size, RunTest(alignment, size, ptr_val, space));
+ }
+ } else {
+ EXPECT_FALSE(RunTest(alignment, space, ptr_val, space));
+ const size_t diff = alignment - ptr_val % alignment;
+ if (space > diff) {
+ EXPECT_TRUE(
+ RunTest(alignment, space - diff, ptr_val + diff, space - diff));
+ for (const size_t size :
+ {1ul, space - diff > 1 ? space - diff - 1 : 1ul, space - diff,
+ space - diff + 1, space - 1}) {
+ EXPECT_EQ(space - diff >= size,
+ RunTest(alignment, size, ptr_val, space));
+ }
+ } else {
+ EXPECT_FALSE(RunTest(alignment, 1, ptr_val, space));
+ }
+ }
+ }
+ }
+ }
+}
+
+} // namespace tensorrt
+} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/resources/trt_resources.h b/tensorflow/contrib/tensorrt/resources/trt_resources.h
index b7d5ffd674..d7d56cb95e 100644
--- a/tensorflow/contrib/tensorrt/resources/trt_resources.h
+++ b/tensorflow/contrib/tensorrt/resources/trt_resources.h
@@ -64,7 +64,7 @@ class TRTCalibrationResource : public tensorflow::ResourceBase {
std::unique_ptr<TRTInt8Calibrator> calibrator_;
TrtUniquePtrType<nvinfer1::IBuilder> builder_;
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
- std::unique_ptr<nvinfer1::IGpuAllocator> allocator_;
+ std::unique_ptr<TRTBaseAllocator> allocator_;
tensorflow::tensorrt::Logger logger_;
// TODO(sami): Use threadpool threads!
std::unique_ptr<std::thread> thr_;
diff --git a/tensorflow/contrib/tensorrt/segment/segment.cc b/tensorflow/contrib/tensorrt/segment/segment.cc
index cc42913eca..008fffc954 100644
--- a/tensorflow/contrib/tensorrt/segment/segment.cc
+++ b/tensorflow/contrib/tensorrt/segment/segment.cc
@@ -15,6 +15,7 @@ limitations under the License.
#include "tensorflow/contrib/tensorrt/segment/segment.h"
+#include <queue>
#include <set>
#include <unordered_map>
#include <vector>
@@ -32,6 +33,7 @@ namespace tensorflow {
namespace tensorrt {
namespace segment {
using ::tensorflow::strings::StrAppend;
+
// A simple graph representation to mirror tensorflow::Graph. This structure
// helps saving memory since segmenter modifies the graph in place, preventing
// the need to create a copy of the graph. It is composed of edges and nodes.
@@ -215,7 +217,7 @@ namespace {
bool CheckCycles(const std::unique_ptr<SimpleGraph>& g, const SimpleNode* src,
const std::vector<SimpleNode*>& start) {
- // copied from TF ReverseDFS.
+ // Copied from TF ReverseDFS, which only works for tensorflow::Graph.
struct Work {
SimpleNode* node;
bool leave; // Are we entering or leaving n?
@@ -269,6 +271,24 @@ bool CanContractEdge(const SimpleEdge* edge,
// 1. Get all nodes incoming to 'dst', excluding 'src'
// 2. Reverse DFS from those nodes
// 3. If reverse DFS reaches 'src' then we have a cycle
+ //
+ // TODO(aaroey): there are several problems with the current approach:
+ // 1. src->dst->src, this is not detected but it should be;
+ // 2. src->dst->...(any node sequence that doesn't contain src)...->dst, this
+ // is detected but it should not be.
+ //
+ // Note that it's fine that dst connects back to src indirectly (i.e. through
+ // a path with length > 1 that consists of intermedia nodes other than src).
+ // While loops is one example.
+ //
+ // The goal is to make sure that the trt subgraph:
+ // 1. has no loops (i.e. is a DAG), and
+ // 2. if there is a path in the subgraph from X to Y (X and Y are both nodes
+ // in the subgraph), then all paths from X to Y are in the subgraph.
+ //
+ // To achieve this goal, the correct way seems to be:
+ // 1. remove any direct edge from src->dst;
+ // 2. detect if src can reach dst, if so they cannot be merged.
std::vector<SimpleNode*> dfs_start_nodes;
for (SimpleNode* node : dst->in_nodes()) {
if (node != src) {
@@ -276,8 +296,8 @@ bool CanContractEdge(const SimpleEdge* edge,
}
}
- bool is_cycle = CheckCycles(graph, src, dfs_start_nodes);
- return !is_cycle;
+ const bool has_cycle = CheckCycles(graph, src, dfs_start_nodes);
+ return !has_cycle;
}
} // namespace
@@ -342,22 +362,20 @@ void ContractEdge(SimpleEdge* edge, SimpleGraph* graph,
}
tensorflow::Status SegmentGraph(
- const tensorflow::GraphDef& gdef,
- const std::function<bool(const tensorflow::Node*)>& candidate_fn,
- const SegmentOptions& options, SegmentNodesVector* segments) {
- // Create a Graph representation of the GraphDef.
- tensorflow::FunctionLibraryDefinition flib(tensorflow::OpRegistry::Global(),
- gdef.library());
- tensorflow::Graph graph(flib);
- TF_RETURN_IF_ERROR(tensorflow::ConvertGraphDefToGraph(
- tensorflow::GraphConstructorOptions(), gdef, &graph));
- return SegmentGraph(&graph, candidate_fn, options, segments);
-}
-
-tensorflow::Status SegmentGraph(
- tensorflow::Graph* tf_graph,
+ const tensorflow::Graph* tf_graph,
const std::function<bool(const tensorflow::Node*)>& candidate_fn,
+ const std::function<bool(const tensorflow::Edge*)>& input_candidate_fn,
+ const std::function<bool(const tensorflow::Edge*)>& output_candidate_fn,
const SegmentOptions& options, SegmentNodesVector* segments) {
+ // Steps:
+ // 1. run the segmentation algorithm to find all the segments, which uses
+ // candidate_fn to determine the candidates segment nodes;
+ // 2. for each segments, remove the nodes that are inputs/outputs of the
+ // segment but are not eligible, using input/output_candidate_fn to
+ // determine the eligibilities;
+ // 3. convert the segment into expected return format and return the result.
+
+ // --------------------------------- Step 1 ---------------------------------
auto graph = std::unique_ptr<SimpleGraph>(new SimpleGraph(tf_graph));
// Use a union-find to collect the nodes that belong to the same
// segment. A node value of nullptr indicates that the node is not a candidate
@@ -372,14 +390,19 @@ tensorflow::Status SegmentGraph(
node_segments.emplace_back(node);
}
- // The segmentation algorithm below visits nodes in reverse
- // topological order and attempts to merge nodes along output
- // edges. That means that subgraphs grow from the output-side of the
- // network towards the inputs. In general this is not guaranteed to
- // produce a globally optimal segmentation. In the future if we have
- // a measure of how beneficial it is to include a given node in a
- // TRT subgraph then we can revisit this algorithm to take advantage
- // of that information.
+ // The segmentation algorithm below visits nodes in reverse topological order
+ // and attempts to merge nodes along output edges. That means that subgraphs
+ // grow from the output-side of the network towards the inputs.
+ //
+ // In general this is not guaranteed to produce a globally optimal
+ // segmentation. For exaample, consider graph with node {A, B, C, D} and edges
+ // {A->B, A->C, B->D, C->D), where A, B, D are trt compatible but C is not, so
+ // in theory we can choose to contract either A, B or B, D but not both, but
+ // here it always choose to contract B, D.
+ //
+ // In the future if we have a measure of how beneficial it is to include a
+ // given node in a TRT subgraph then we can revisit this algorithm to take
+ // advantage of that information.
std::vector<tensorflow::Node*> tforder;
tensorflow::GetPostOrder(*tf_graph, &tforder);
// use postorder implementation from tensorflow and construct mirror in
@@ -392,13 +415,11 @@ tensorflow::Status SegmentGraph(
for (const SimpleNode* node : order) {
// All output nodes of 'node' have been visited...
VLOG(2) << "Trying node " << node->name() << " id=" << node->id();
-
// 'node' must be a TRT candidate...
if (node_segments[node->id()].Value() == nullptr) {
VLOG(2) << "... not a TRT candidate";
continue;
}
-
// Contract output edges to combine 'node' with output
// nodes. Iterate since combining two nodes may unblock other
// combining.
@@ -416,7 +437,6 @@ tensorflow::Status SegmentGraph(
VLOG(2) << "... ... not a TRT candidate";
continue;
}
-
if (CanContractEdge(out_edge, graph)) {
VLOG(2) << "... ... can contract";
contract_edges.insert(out_edge);
@@ -424,11 +444,9 @@ tensorflow::Status SegmentGraph(
VLOG(2) << "... ... cannot contract, would form cycle";
}
}
-
if (contract_edges.empty()) {
break;
}
-
// Contract edges and collect the adjacent nodes into the same
// segment/subgraph.
while (!contract_edges.empty()) {
@@ -457,11 +475,22 @@ tensorflow::Status SegmentGraph(
// Collect the segments/subgraphs. Each subgraph is represented by a
// set of the names of the nodes in that subgraph.
- std::unordered_map<string, std::set<string>> sg_map;
+
+ // A map from the segment identifier (currently the name of the root node of
+ // the segment tree) to the segment nodes set.
+ std::unordered_map<string, std::set<const tensorflow::Node*>> sg_map;
+
+ // A map from the segment identifier (currently the name of the root node of
+ // the segment tree) to the device names that the nodes in the segment are
+ // assigned to.
+ //
+ // TODO(aaroey): nodes assigned to different devices should not be merged,
+ // fix this.
std::unordered_map<string, std::set<string>> device_maps;
+
for (auto& u : node_segments) {
if ((u.Value() != nullptr) && (u.ParentValue() != nullptr)) {
- sg_map[u.ParentValue()->name()].insert(u.Value()->name());
+ sg_map[u.ParentValue()->name()].insert(u.Value()->tf_node());
auto tf_node = u.Value()->tf_node();
// has_assigned_device_name() is expected to return true
// when called from optimization pass. However, since graph
@@ -482,25 +511,104 @@ tensorflow::Status SegmentGraph(
}
}
+ // --------------------------------- Step 2 ---------------------------------
+ // Remove ineligible input/output nodes.
+ for (auto& itr : sg_map) {
+ std::set<const tensorflow::Node*>& segment_nodes = itr.second;
+ VLOG(1) << "Segment original size: " << segment_nodes.size();
+ while (true) {
+ std::deque<const tensorflow::Node*> in_nodes_que, out_nodes_que;
+ // Find an input node that is not eligible and add it to the queue.
+ // Nodes that has no incoming edges should not be treated as "input",
+ // as there are really no inputs to them. Similar for output nodes.
+ for (auto node : segment_nodes) {
+ bool added = false;
+ for (const tensorflow::Edge* edge : node->in_edges()) {
+ if (!edge->IsControlEdge() && !edge->src()->IsSource() &&
+ !segment_nodes.count(edge->src())) { // 'node' is an input node.
+ if (!input_candidate_fn(edge)) {
+ in_nodes_que.push_back(node);
+ added = true;
+ break;
+ }
+ }
+ }
+ if (added) continue; // Only adding the node once to either queue.
+ for (const tensorflow::Edge* edge : node->out_edges()) {
+ if (!edge->dst()->IsSink() && !edge->IsControlEdge() &&
+ !segment_nodes.count(edge->dst())) { // 'node' is an output node.
+ if (!output_candidate_fn(edge)) {
+ out_nodes_que.push_back(node);
+ break;
+ }
+ }
+ }
+ }
+ if (in_nodes_que.empty() && out_nodes_que.empty()) {
+ // No more ineligible input/output nodes.
+ break;
+ }
+ // Now for each ineligible node, remove all of its inputs or outputs from
+ // the subgraph.
+ //
+ // It can be proven that, if the original subgraph:
+ // 1. is a DAG, and
+ // 2. all paths between two nodes in the subgraph are all inside the
+ // subgraph
+ // then after doing this operation the resulting subgraph will keep the
+ // same properties 1 and 2.
+ //
+ // For simplicity we use heuristics: for input nodes remove all its
+ // input, for output nodes remove all its output. In this way, for common
+ // cases the number of removed nodes should be minimum.
+ auto remove_nodes = [&segment_nodes](
+ bool is_input_nodes,
+ std::deque<const tensorflow::Node*>* que) {
+ // Run a BFS on the queue to find all the input/output nodes.
+ std::set<const tensorflow::Node*> visited;
+ while (!que->empty()) {
+ auto node = que->front();
+ que->pop_front();
+ if (!visited.insert(node).second) continue;
+ segment_nodes.erase(node);
+ for (auto in :
+ is_input_nodes ? node->in_nodes() : node->out_nodes()) {
+ if (segment_nodes.count(in)) {
+ que->push_back(in);
+ VLOG(2) << "Need to remove node " << in->name()
+ << " because one of its "
+ << (is_input_nodes ? "output" : "input")
+ << " nodes in the graph was removed: " << node->name();
+ }
+ }
+ }
+ };
+ remove_nodes(true, &in_nodes_que);
+ remove_nodes(false, &out_nodes_que);
+ }
+ VLOG(1) << "Segment new size: " << segment_nodes.size();
+ }
+
+ // --------------------------------- Step 3 ---------------------------------
// Convert the segments into the expected return format
for (const auto& itr : sg_map) {
- const auto& segment_node_names = itr.second;
+ const std::set<const tensorflow::Node*>& segment_nodes = itr.second;
if (VLOG_IS_ON(1)) {
string s;
- for (const auto& name : segment_node_names) {
- s += " " + name;
- }
- VLOG(1) << "Segment " << segments->size() << ":" << s;
+ for (auto node : segment_nodes) s += " " + node->name();
+ VLOG(1) << "Segment " << segments->size() << ": " << s;
}
// Don't use small segments.
- if (static_cast<int>(segment_node_names.size()) <
- options.minimum_segment_size) {
+ if (static_cast<int>(segment_nodes.size()) < options.minimum_segment_size) {
VLOG(1) << "Segment " << segments->size() << " has only "
- << segment_node_names.size() << " nodes, dropping";
+ << segment_nodes.size() << " nodes, dropping";
continue;
}
+
// TODO(sami): Make segmenter placement aware once trtscopes are in place
+ std::set<string> segment_node_names;
+ for (auto node : itr.second) segment_node_names.insert(node->name());
const auto& dev_itr = device_maps.find(itr.first);
if (dev_itr == device_maps.end() || dev_itr->second.empty()) {
VLOG(1) << "No device assigned to segment " << segments->size();
diff --git a/tensorflow/contrib/tensorrt/segment/segment.h b/tensorflow/contrib/tensorrt/segment/segment.h
index 81b4bfe49f..8c44eb782a 100644
--- a/tensorflow/contrib/tensorrt/segment/segment.h
+++ b/tensorflow/contrib/tensorrt/segment/segment.h
@@ -42,22 +42,6 @@ struct SegmentOptions {
// Get the subgraphs of a graph that can be handled by TensorRT.
//
-// @param gdef The GraphDef describing the network
-// @param candidate_fn A function that returns true for a NodeDef if
-// that node can be handled by TensorRT.
-// @param segments Returns the TensorRT segments/subgraphs. Each entry
-// in the vector describes a subgraph by giving a set of the names of
-// all the NodeDefs in that subgraph.
-// @return the status.
-//
-// TODO(aaroey): remove this method.
-tensorflow::Status SegmentGraph(
- const tensorflow::GraphDef& gdef,
- const std::function<bool(const tensorflow::Node*)>& candidate_fn,
- const SegmentOptions& options, SegmentNodesVector* segments);
-
-// Get the subgraphs of a graph that can be handled by TensorRT.
-//
// @param graph tensorflow::Graph of the network
// @param candidate_fn A function that returns true for a Node* if
// that node can be handled by TensorRT.
@@ -66,8 +50,10 @@ tensorflow::Status SegmentGraph(
// all the NodeDefs in that subgraph.
// @return the status.
tensorflow::Status SegmentGraph(
- tensorflow::Graph* tf_graph,
+ const tensorflow::Graph* tf_graph,
const std::function<bool(const tensorflow::Node*)>& candidate_fn,
+ const std::function<bool(const tensorflow::Edge*)>& input_candidate_fn,
+ const std::function<bool(const tensorflow::Edge*)>& output_candidate_fn,
const SegmentOptions& options, SegmentNodesVector* segments);
} // namespace segment
diff --git a/tensorflow/contrib/tensorrt/segment/segment_test.cc b/tensorflow/contrib/tensorrt/segment/segment_test.cc
index f5b2d258d7..432e7b1c04 100644
--- a/tensorflow/contrib/tensorrt/segment/segment_test.cc
+++ b/tensorflow/contrib/tensorrt/segment/segment_test.cc
@@ -14,350 +14,245 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/contrib/tensorrt/segment/segment.h"
-#include "tensorflow/c/c_api.h"
-#include "tensorflow/core/framework/graph.pb.h"
+
+#include "tensorflow/cc/framework/scope.h"
+#include "tensorflow/cc/ops/standard_ops.h"
+#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace test {
+namespace ops = ::tensorflow::ops;
class SegmentTest : public ::testing::Test {
- public:
- bool GetGraphDef(TF_Graph* graph, tensorflow::GraphDef* graph_def);
-
- TF_Operation* Placeholder(TF_Graph* graph, TF_Status* s, const char* name);
- TF_Operation* Add(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
- TF_Status* s, const char* name);
-
- std::function<bool(const tensorflow::Node*)> MakeCandidateFn(
- const std::set<string>& node_names);
-
protected:
- void PlaceholderHelper(TF_Graph* graph, TF_Status* s, const char* name,
- TF_Operation** op);
- void AddHelper(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
- TF_Status* s, const char* name, TF_Operation** op, bool check);
-
- SegmentOptions default_options_;
-};
-
-bool SegmentTest::GetGraphDef(TF_Graph* graph,
- tensorflow::GraphDef* graph_def) {
- TF_Status* s = TF_NewStatus();
- TF_Buffer* buffer = TF_NewBuffer();
- TF_GraphToGraphDef(graph, buffer, s);
- bool ret = TF_GetCode(s) == TF_OK;
- EXPECT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- if (ret) ret = graph_def->ParseFromArray(buffer->data, buffer->length);
- TF_DeleteBuffer(buffer);
- TF_DeleteStatus(s);
- return ret;
-}
+ std::function<bool(const tensorflow::Node*)> MakeCandidateFn(
+ const std::set<string>& node_names) {
+ return [node_names](const tensorflow::Node* node) -> bool {
+ return node_names.find(node->name()) != node_names.end();
+ };
+ }
-std::function<bool(const tensorflow::Node*)> SegmentTest::MakeCandidateFn(
- const std::set<string>& node_names) {
- return [node_names](const tensorflow::Node* node) -> bool {
- return node_names.find(node->name()) != node_names.end();
- };
-}
+ std::function<bool(const tensorflow::Edge*)> MakeInputEdgeCandidateFn(
+ const std::set<string>& node_names) {
+ return [node_names](const tensorflow::Edge* in_edge) -> bool {
+ return node_names.find(in_edge->dst()->name()) != node_names.end();
+ };
+ }
-void SegmentTest::PlaceholderHelper(TF_Graph* graph, TF_Status* s,
- const char* name, TF_Operation** op) {
- TF_OperationDescription* desc = TF_NewOperation(graph, "Placeholder", name);
- TF_SetAttrType(desc, "dtype", TF_INT32);
- *op = TF_FinishOperation(desc, s);
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- ASSERT_NE(*op, nullptr);
-}
+ std::function<bool(const tensorflow::Edge*)> MakeOutputEdgeCandidateFn(
+ const std::set<string>& node_names) {
+ return [node_names](const tensorflow::Edge* out_edge) -> bool {
+ return node_names.find(out_edge->src()->name()) != node_names.end();
+ };
+ }
-TF_Operation* SegmentTest::Placeholder(TF_Graph* graph, TF_Status* s,
- const char* name) {
- TF_Operation* op;
- PlaceholderHelper(graph, s, name, &op);
- return op;
-}
+ void RunTest(const tensorflow::Graph* graph,
+ const std::set<string>& candidates,
+ const std::set<string>& input_candidates,
+ const std::set<string>& output_candidates,
+ const std::vector<std::set<string>>& expected_segments) {
+ SegmentNodesVector segments;
+ TF_EXPECT_OK(SegmentGraph(graph, MakeCandidateFn(candidates),
+ MakeInputEdgeCandidateFn(input_candidates),
+ MakeOutputEdgeCandidateFn(output_candidates),
+ default_options_, &segments));
+ ValidateSegment(segments, expected_segments);
+ }
-void SegmentTest::AddHelper(TF_Operation* l, TF_Operation* r, TF_Graph* graph,
- TF_Status* s, const char* name, TF_Operation** op,
- bool check) {
- TF_OperationDescription* desc = TF_NewOperation(graph, "AddN", name);
- TF_Output add_inputs[2] = {{l, 0}, {r, 0}};
- TF_AddInputList(desc, add_inputs, 2);
- *op = TF_FinishOperation(desc, s);
- if (check) {
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- ASSERT_NE(*op, nullptr);
+ void ValidateSegment(const SegmentNodesVector& segments,
+ const std::vector<std::set<string>>& expected_segments) {
+ EXPECT_EQ(expected_segments.size(), segments.size());
+ for (int i = 0; i < segments.size(); ++i) {
+ const auto& segment_node_names = segments[i].first;
+ const auto& expected = expected_segments[i];
+ for (const auto& name : expected) {
+ EXPECT_TRUE(segment_node_names.count(name))
+ << "Segment " << i << " is missing expected node: " << name;
+ }
+ if (segment_node_names.size() == expected.size()) continue;
+ for (const auto& name : segment_node_names) {
+ EXPECT_TRUE(expected.count(name))
+ << "Unexpected node found in segment " << i << ": " << name;
+ }
+ }
}
-}
-TF_Operation* SegmentTest::Add(TF_Operation* l, TF_Operation* r,
- TF_Graph* graph, TF_Status* s,
- const char* name) {
- TF_Operation* op;
- AddHelper(l, r, graph, s, name, &op, true);
- return op;
+ SegmentOptions default_options_;
+};
+
+std::set<string> operator-(const std::set<string>& lhs, const string& rhs) {
+ std::set<string> result = lhs;
+ CHECK(result.erase(rhs));
+ return result;
}
TEST_F(SegmentTest, Empty) {
- TF_Graph* graph = TF_NewGraph();
-
- GraphDef graph_def;
- ASSERT_TRUE(GetGraphDef(graph, &graph_def));
-
- SegmentNodesVector segments;
- ASSERT_EQ(
- SegmentGraph(graph_def, MakeCandidateFn({}), default_options_, &segments),
- tensorflow::Status::OK());
-
+ Scope s = Scope::NewRootScope();
+ tensorflow::Graph g(OpRegistry::Global());
+ TF_EXPECT_OK(s.ToGraph(&g));
// Expect no segments/subgraphs.
- EXPECT_TRUE(segments.empty());
- TF_DeleteGraph(graph);
+ RunTest(&g, {}, {}, {}, {});
}
TEST_F(SegmentTest, Simple) {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
// feed
- // // ||
+ // // \\
// add0 add1
- // | | /
+ // | \ /
// | add2
- // | / ||
+ // | / \\
// add3 add4
- // | /
+ // \ /
// <sink>
- //
- TF_Operation* feed = Placeholder(graph, s, "feed");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("feed"), string(TF_OperationName(feed)));
-
- TF_Operation* add0 = Add(feed, feed, graph, s, "add0");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add1 = Add(feed, feed, graph, s, "add1");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add2 = Add(add0, add1, graph, s, "add2");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add3 = Add(add0, add2, graph, s, "add3");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add3"), string(TF_OperationName(add3)));
- TF_Operation* add4 = Add(add2, add2, graph, s, "add4");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add4"), string(TF_OperationName(add4)));
-
- GraphDef graph_def;
- ASSERT_TRUE(GetGraphDef(graph, &graph_def));
-
- SegmentNodesVector segments;
- ASSERT_EQ(
- SegmentGraph(graph_def,
- MakeCandidateFn({"add0", "add1", "add2", "add3", "add4"}),
- default_options_, &segments),
- tensorflow::Status::OK());
-
- // Expect all Add operations to be collapsed into a single segment
- ASSERT_EQ(segments.size(), 1);
- std::vector<string> expected{"add0", "add1", "add2", "add3", "add4"};
- for (const auto& ex : expected) {
- EXPECT_TRUE(segments[0].first.find(ex) != segments[0].first.end())
- << "Missing expected node " << ex;
- }
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
+ Scope s = Scope::NewRootScope();
+ auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
+ auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
+ auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
+ auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
+ auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
+ auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
+ tensorflow::Graph g(OpRegistry::Global());
+ TF_EXPECT_OK(s.ToGraph(&g));
+
+ // All Add operations are candidates, and we expect all of them to be
+ // collapsed into a single segment
+ const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
+ RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
+
+ // Make add1 not a candidate, and we expect all other Add operations to be
+ // collapsed into a single segment
+ auto without_add1 = all_adds - "add1";
+ RunTest(&g, without_add1, without_add1, without_add1, {without_add1});
+
+ // Make add1 not a candidate and add2 not an input candidate, and we expect
+ // add0 and add2 are removed from the segment.
+ auto without_add2 = all_adds - "add2";
+ RunTest(&g, without_add1, without_add2, without_add1, {{"add3", "add4"}});
+
+ // Making add2 not an input candidate itself won't affect anything.
+ RunTest(&g, all_adds, without_add2, all_adds, {all_adds});
+
+ // Making add1 not an input candidate.
+ RunTest(&g, all_adds, without_add1, all_adds, {without_add1});
+
+ // Making add3 not an output candidate doesn't affect anything, since it's
+ // output is sink.
+ auto without_add3 = all_adds - "add3";
+ RunTest(&g, all_adds, all_adds, without_add3, {all_adds});
}
TEST_F(SegmentTest, AvoidCycle) {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
- // add2 is not a TRT candidate so add0/add3 cannot be formed as a
- // subgraph
- //
// feed
- // // ||
+ // // \\
// add0 add1
- // | | /
+ // | \ /
// | add2
- // | / ||
+ // | / \\
// add3 add4
- // | /
+ // \ /
// <sink>
- //
- TF_Operation* feed = Placeholder(graph, s, "feed");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("feed"), string(TF_OperationName(feed)));
-
- TF_Operation* add0 = Add(feed, feed, graph, s, "add0");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add1 = Add(feed, feed, graph, s, "add1");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add2 = Add(add0, add1, graph, s, "add2");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add3 = Add(add0, add2, graph, s, "add3");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add3"), string(TF_OperationName(add3)));
- TF_Operation* add4 = Add(add2, add2, graph, s, "add4");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add4"), string(TF_OperationName(add4)));
-
- GraphDef graph_def;
- ASSERT_TRUE(GetGraphDef(graph, &graph_def));
-
- SegmentNodesVector segments;
- ASSERT_EQ(
- SegmentGraph(graph_def, MakeCandidateFn({"add0", "add1", "add3", "add4"}),
- default_options_, &segments),
- tensorflow::Status::OK());
-
- // Expect no subgraphs
- EXPECT_EQ(segments.size(), 0);
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
+ Scope s = Scope::NewRootScope();
+ auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
+ auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
+ auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
+ auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
+ auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
+ auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
+ tensorflow::Graph g(OpRegistry::Global());
+ TF_EXPECT_OK(s.ToGraph(&g));
+
+ // add2 is not a TRT candidate so there should be no segments generated.
+ const std::set<string> without_add2 = {"add0", "add1", "add3", "add4"};
+ RunTest(&g, without_add2, without_add2, without_add2, {});
}
TEST_F(SegmentTest, Multiple) {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
- // add5 is not a TRT candidate so two subgraphs should be formed
- //
- // feed
- // // || ||
- // add0 add1 add7
- // | | / / ||
- // | add2-----add5 add8
- // | / | | | |
- // add3 add4 add6
- // | | /
- // <sink>
- //
- TF_Operation* feed = Placeholder(graph, s, "feed");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("feed"), string(TF_OperationName(feed)));
-
- TF_Operation* add0 = Add(feed, feed, graph, s, "add0");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add1 = Add(feed, feed, graph, s, "add1");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add7 = Add(feed, feed, graph, s, "add7");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add2 = Add(add0, add1, graph, s, "add2");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add5 = Add(add2, add7, graph, s, "add5");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add8 = Add(add7, add7, graph, s, "add8");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add3 = Add(add0, add2, graph, s, "add3");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add3"), string(TF_OperationName(add3)));
- TF_Operation* add4 = Add(add2, add5, graph, s, "add4");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add4"), string(TF_OperationName(add4)));
- TF_Operation* add6 = Add(add5, add8, graph, s, "add6");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add6"), string(TF_OperationName(add6)));
-
- GraphDef graph_def;
- ASSERT_TRUE(GetGraphDef(graph, &graph_def));
-
- SegmentNodesVector segments;
- ASSERT_EQ(SegmentGraph(graph_def,
- MakeCandidateFn({"add0", "add1", "add2", "add3",
- "add4", "add6", "add7", "add8"}),
- default_options_, &segments),
- tensorflow::Status::OK());
-
- // Expect two subgraphs
- EXPECT_EQ(segments.size(), 2);
-
- std::vector<string> expected0{"add6", "add8"};
- for (const auto& ex : expected0) {
- EXPECT_TRUE(segments[0].first.find(ex) != segments[0].first.end())
- << "Missing expected node " << ex;
- }
-
- std::vector<string> expected1{"add0", "add1", "add2", "add3"};
- for (const auto& ex : expected1) {
- EXPECT_TRUE(segments[1].first.find(ex) != segments[1].first.end())
- << "Missing expected node " << ex;
- }
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
+ // feed
+ // // || \\
+ // add0 add1 add7
+ // | \ / / \\
+ // | add2 / \\
+ // | || \ | ||
+ // | || add5 add8
+ // | / \ / \ /
+ // add3 add4 add6
+ // \ | /
+ // <sink>
+ Scope s = Scope::NewRootScope();
+ auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
+ auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
+ auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
+ auto add7 = ops::Add(s.WithOpName("add7"), feed, feed);
+ auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
+ auto add5 = ops::Add(s.WithOpName("add5"), add2, add7);
+ auto add8 = ops::Add(s.WithOpName("add8"), add7, add7);
+ auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
+ auto add4 = ops::Add(s.WithOpName("add4"), add2, add5);
+ auto add6 = ops::Add(s.WithOpName("add6"), add5, add8);
+ tensorflow::Graph g(OpRegistry::Global());
+ TF_EXPECT_OK(s.ToGraph(&g));
+
+ const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4",
+ "add5", "add6", "add7", "add8"};
+ // Make add5 not a TRT candidate, and we expect two segments.
+ auto without_add5 = all_adds - "add5";
+ RunTest(&g, without_add5, without_add5, without_add5,
+ {{"add6", "add8"}, {"add0", "add1", "add2", "add3"}});
+
+ // Make add8 not a candidate and add6 not an input candidate, then all direct
+ // and indirect inputs of add6 will be removed from the segment.
+ auto without_add8 = all_adds - "add8";
+ auto without_add6 = all_adds - "add6";
+ RunTest(&g, without_add8, without_add6, all_adds, {{"add3", "add4"}});
+
+ // Make add3 not a candidate and add0 not an output candidate, then all
+ // direct and indirect outputs of add0 will be removed from the segment.
+ auto without_add3 = all_adds - "add3";
+ auto without_add0 = all_adds - "add0";
+ RunTest(&g, without_add3, all_adds, without_add0, {{"add1", "add7", "add8"}});
}
TEST_F(SegmentTest, BigIfElse) {
- TF_Status* s = TF_NewStatus();
- TF_Graph* graph = TF_NewGraph();
-
- // add2 is not a TRT candidate
- //
// feed
// ||
// add0
- // // ||
+ // // \\
// add1 add4
// || ||
// add2 add5
// || ||
// add3 add6
- // || //
+ // \\ //
// add7
// ||
// <sink>
- //
- TF_Operation* feed = Placeholder(graph, s, "feed");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("feed"), string(TF_OperationName(feed)));
-
- TF_Operation* add0 = Add(feed, feed, graph, s, "add0");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add1 = Add(add0, add0, graph, s, "add1");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add2 = Add(add1, add1, graph, s, "add2");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add3 = Add(add2, add2, graph, s, "add3");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add4 = Add(add0, add0, graph, s, "add4");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add5 = Add(add4, add4, graph, s, "add5");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add6 = Add(add5, add5, graph, s, "add6");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- TF_Operation* add7 = Add(add3, add6, graph, s, "add7");
- ASSERT_EQ(TF_OK, TF_GetCode(s)) << TF_Message(s);
- EXPECT_EQ(string("add7"), string(TF_OperationName(add7)));
-
- GraphDef graph_def;
- ASSERT_TRUE(GetGraphDef(graph, &graph_def));
-
- SegmentNodesVector segments;
- ASSERT_EQ(SegmentGraph(graph_def,
- MakeCandidateFn({"add0", "add1", "add3", "add4",
- "add5", "add6", "add7"}),
- default_options_, &segments),
- tensorflow::Status::OK());
-
- // Expect 2 subgraphs
- EXPECT_EQ(segments.size(), 2);
-
- std::vector<string> expected0{"add3", "add4", "add5", "add6", "add7"};
- for (const auto& ex : expected0) {
- EXPECT_TRUE(segments[0].first.find(ex) != segments[0].first.end())
- << "Missing expected node " << ex;
- }
-
- std::vector<string> expected1{"add0", "add1"};
- for (const auto& ex : expected1) {
- EXPECT_TRUE(segments[1].first.find(ex) != segments[1].first.end())
- << "Missing expected node " << ex;
- }
- TF_DeleteGraph(graph);
- TF_DeleteStatus(s);
+ Scope s = Scope::NewRootScope();
+ auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
+ auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
+ auto add1 = ops::Add(s.WithOpName("add1"), add0, add0);
+ auto add2 = ops::Add(s.WithOpName("add2"), add1, add1);
+ auto add3 = ops::Add(s.WithOpName("add3"), add2, add2);
+ auto add4 = ops::Add(s.WithOpName("add4"), add0, add0);
+ auto add5 = ops::Add(s.WithOpName("add5"), add4, add4);
+ auto add6 = ops::Add(s.WithOpName("add6"), add5, add5);
+ auto add7 = ops::Add(s.WithOpName("add7"), add3, add6);
+ tensorflow::Graph g(OpRegistry::Global());
+ TF_EXPECT_OK(s.ToGraph(&g));
+
+ // Make add2 not a TRT candidate, and we expect 2 segments.
+ const std::set<string> all_adds = {"add0", "add1", "add2", "add3",
+ "add4", "add5", "add6", "add7"};
+ RunTest(&g, all_adds - "add2", all_adds, all_adds,
+ {{"add3", "add4", "add5", "add6", "add7"}, {"add0", "add1"}});
}
} // namespace test
diff --git a/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc b/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
index 227ac120dd..f30dba59ad 100644
--- a/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
+++ b/tensorflow/contrib/tensorrt/shape_fn/trt_shfn.cc
@@ -28,36 +28,50 @@ limitations under the License.
namespace tensorflow {
namespace shape_inference {
-tensorflow::Status TRTEngineOpShapeInference(InferenceContext* context) {
- std::vector<tensorflow::TensorShape> shapes;
- for (int i = 0; i < context->num_outputs(); ++i) {
- context->set_output(i, context->UnknownShape());
+tensorflow::Status TRTEngineOpShapeInference(InferenceContext* c) {
+ for (int i = 0; i < c->num_outputs(); ++i) {
+ c->set_output(i, c->UnknownShape());
}
- auto status = context->GetAttr("input_shapes", &shapes);
- // it is ok to not to have shapes
- if (!status.ok()) return Status::OK();
- if ((int)shapes.size() != context->num_inputs()) return Status::OK();
- bool different_input = false;
- for (int i = 0; i < context->num_inputs(); ++i) {
- if (shapes.at(i) != context->input_tensor(i)->shape())
- different_input = true;
+
+ // Check the sanity of the input shapes.
+ std::vector<tensorflow::TensorShape> input_shapes;
+ TF_RETURN_IF_ERROR(c->GetAttr("input_shapes", &input_shapes));
+ if (input_shapes.size() != c->num_inputs()) {
+ return tensorflow::errors::InvalidArgument(
+ "The actual number of inputs doesn't match the number of input "
+ "shapes set in the attr: ",
+ c->num_inputs(), " vs ", input_shapes.size());
+ }
+ bool input_match = true;
+ for (int i = 0; i < c->num_inputs(); ++i) {
+ ShapeHandle handle;
+ TF_RETURN_IF_ERROR(
+ c->MakeShapeFromTensorShape(input_shapes.at(i), &handle));
+ ShapeHandle merged;
+ if (!c->Merge(c->input(i), handle, &merged).ok()) {
+ // Input shape doesn't match what was set in attr, fine.
+ input_match = false;
+ }
}
- if (different_input) return Status::OK();
- shapes.resize(0);
- status = context->GetAttr("output_shapes", &shapes);
- if (!status.ok()) return Status::OK();
- if ((int)shapes.size() != context->num_outputs()) return Status::OK();
- std::vector<ShapeHandle> shape_handles(shapes.size());
- for (size_t i = 0; i < shapes.size(); ++i) {
- status =
- context->MakeShapeFromTensorShape(shapes.at(i), &shape_handles.at(i));
- if (!status.ok()) return Status::OK();
+
+ // Check the sanity of the output shapes.
+ std::vector<tensorflow::TensorShape> output_shapes;
+ TF_RETURN_IF_ERROR(c->GetAttr("output_shapes", &output_shapes));
+ if (output_shapes.size() != c->num_outputs()) {
+ return tensorflow::errors::InvalidArgument(
+ "The actual number of outputs doesn't match the number of output "
+ "shapes set in the attr: ",
+ c->num_outputs(), " vs ", output_shapes.size());
}
- for (int i = 0; i < context->num_outputs(); ++i) {
- context->set_output(i, shape_handles.at(i));
+ for (size_t i = 0; i < output_shapes.size(); ++i) {
+ ShapeHandle handle;
+ TF_RETURN_IF_ERROR(
+ c->MakeShapeFromTensorShape(output_shapes.at(i), &handle));
+ if (input_match) c->set_output(i, handle);
}
return Status::OK();
}
+
} // namespace shape_inference
} // namespace tensorflow
diff --git a/tensorflow/contrib/tensorrt/test/base_test.py b/tensorflow/contrib/tensorrt/test/base_test.py
new file mode 100644
index 0000000000..edd30ad7a9
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/base_test.py
@@ -0,0 +1,126 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Basic tests for TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
+
+
+class SimpleSingleEngineGraphDefTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Create a graph containing single segment."""
+ # TODO(aaroey): test graph with different dtypes.
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [100, 24, 24, 2]
+ g = ops.Graph()
+ with g.as_default():
+ inp = array_ops.placeholder(
+ dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
+ with g.device("/GPU:0"):
+ conv_filter = constant_op.constant(
+ [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
+ name="weights",
+ dtype=dtype)
+ conv = nn.conv2d(
+ input=inp,
+ filter=conv_filter,
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ name="conv")
+ bias = constant_op.constant(
+ [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
+ added = nn.bias_add(conv, bias, name="bias_add")
+ relu = nn.relu(added, "relu")
+ identity = array_ops.identity(relu, "identity")
+ pool = nn_ops.max_pool(
+ identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
+ array_ops.squeeze(pool, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=1,
+ expected_output_dims=(100, 6, 6, 6),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+class SimpleMultiEngineGraphDefTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Create a graph containing multiple segment."""
+ # TODO(aaroey): test graph with different dtypes.
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [100, 24, 24, 2]
+ g = ops.Graph()
+ with g.as_default():
+ inp = array_ops.placeholder(
+ dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
+ with g.device("/GPU:0"):
+ conv_filter = constant_op.constant(
+ [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
+ name="weights",
+ dtype=dtype)
+ conv = nn.conv2d(
+ input=inp,
+ filter=conv_filter,
+ strides=[1, 2, 2, 1],
+ padding="SAME",
+ name="conv")
+ c1 = constant_op.constant(
+ np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype)
+ p = conv * c1
+ c2 = constant_op.constant(
+ np.random.randn(input_dims[0], 12, 12, 6), dtype=dtype)
+ q = conv / c2
+
+ edge = self.trt_incompatible_op(q)
+ edge /= edge
+ r = edge + edge
+
+ p -= edge
+ q *= edge
+ s = p + q
+ s -= r
+ array_ops.squeeze(s, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=2,
+ expected_output_dims=(100, 12, 12, 6),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+# TODO(aaroey): add a large complex graph to test.
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/batch_matmul_test.py b/tensorflow/contrib/tensorrt/test/batch_matmul_test.py
new file mode 100644
index 0000000000..730b6843fb
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/batch_matmul_test.py
@@ -0,0 +1,76 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+
+
+class BatchMatMulTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Testing conversion of BatchMatMul in TF-TRT conversion."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [12, 5, 8, 12]
+ w1_name = "matmul_w1"
+ w1_dims = [12, 5, 12, 7]
+ w2_name = "matmul_w2"
+ w2_dims = [12, 12, 7]
+ g = ops.Graph()
+ with g.as_default():
+ inp = array_ops.placeholder(
+ dtype=dtype, shape=[None] + input_dims[1:], name=input_name)
+ w1 = array_ops.placeholder(dtype=dtype, shape=w1_dims, name=w1_name)
+ w2 = array_ops.placeholder(dtype=dtype, shape=w2_dims, name=w2_name)
+ with g.device("/GPU:0"):
+ b = constant_op.constant(np.random.randn(12, 5, 12, 7), dtype=dtype)
+ c = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
+ d = constant_op.constant(np.random.randn(5, 1, 1), dtype=dtype)
+ x1 = math_ops.matmul(inp, b)
+ x1 = x1 + c
+ x2 = math_ops.matmul(inp, w1)
+ x2 = x2 * d
+ e = gen_array_ops.reshape(inp, [12, 40, 12])
+ x3 = math_ops.matmul(e, w2)
+ f = constant_op.constant(np.random.randn(40, 1), dtype=dtype)
+ x3 = x3 + f
+ x3 = gen_array_ops.reshape(x3, [12, 5, 8, 7])
+ out = x1 + x2 + x3
+ array_ops.squeeze(out, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name, w1_name, w2_name],
+ input_dims=[input_dims, w1_dims, w2_dims],
+ num_expected_engines=1,
+ expected_output_dims=(12, 5, 8, 7),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/biasadd_matmul_test.py b/tensorflow/contrib/tensorrt/test/biasadd_matmul_test.py
new file mode 100644
index 0000000000..0c03a10b64
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/biasadd_matmul_test.py
@@ -0,0 +1,112 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.platform import test
+
+
+class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Testing conversion of BiasAdd MatMul in TF-TRT conversion."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [48, 12]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+
+ b = constant_op.constant(np.random.randn(12, 4), dtype=dtype)
+ x1 = math_ops.matmul(x, b)
+ b = constant_op.constant(np.random.randn(1, 4), dtype=dtype)
+ x1 = x1 + b
+
+ b = constant_op.constant(np.random.randn(48, 4), dtype=dtype)
+ x2 = math_ops.matmul(x, b, transpose_a=True)
+ x2 = gen_array_ops.reshape(x2, [48, 1])
+
+ b = constant_op.constant(np.random.randn(4, 12), dtype=dtype)
+ x3 = math_ops.matmul(x, b, transpose_b=True)
+
+ b = constant_op.constant(np.random.randn(16, 48), dtype=dtype)
+ x4 = math_ops.matmul(x, b, transpose_b=True, transpose_a=True)
+ x4 = gen_array_ops.reshape(x4, [48, 4])
+
+ x5 = gen_array_ops.reshape(x, [4, 144])
+ b = constant_op.constant(np.random.randn(144, 48), dtype=dtype)
+ x5 = math_ops.matmul(x5, b)
+ b = constant_op.constant(np.random.randn(48), dtype=dtype)
+ x5 = nn.bias_add(x5, b)
+ x5 = gen_array_ops.reshape(x5, [48, 4])
+
+ x6 = gen_array_ops.reshape(x, [4, 12, 12])
+ b = constant_op.constant(np.random.randn(12), dtype=dtype)
+ x6 = nn.bias_add(x6, b, data_format="NHWC")
+ x6 = gen_array_ops.reshape(x6, [48, -1])
+
+ x7 = gen_array_ops.reshape(x, [4, 12, 3, 4])
+ b = constant_op.constant(np.random.randn(4), dtype=dtype)
+ x7 = nn.bias_add(x7, b, data_format="NHWC")
+ x7 = gen_array_ops.reshape(x7, [48, -1])
+
+ x8 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
+ b = constant_op.constant(np.random.randn(2), dtype=dtype)
+ x8 = nn.bias_add(x8, b, data_format="NHWC")
+ x8 = gen_array_ops.reshape(x8, [48, -1])
+
+ x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
+ b = constant_op.constant(np.random.randn(3), dtype=dtype)
+ x9 = nn.bias_add(x9, b, data_format="NCHW")
+ x9 = gen_array_ops.reshape(x9, [48, -1])
+
+ x10 = gen_array_ops.reshape(x, [4, 12, 3, 4])
+ b = constant_op.constant(np.random.randn(12), dtype=dtype)
+ x10 = nn.bias_add(x10, b, data_format="NCHW")
+ x10 = gen_array_ops.reshape(x10, [48, -1])
+
+ x11 = gen_array_ops.reshape(x, [4, 12, 12])
+ b = constant_op.constant(np.random.randn(4), dtype=dtype)
+ x11 = nn.bias_add(x11, b, data_format="NCHW")
+ x11 = gen_array_ops.reshape(x11, [48, -1])
+
+ out = array_ops.concat(
+ [x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11], axis=-1)
+ out = array_ops.squeeze(out, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=7,
+ expected_output_dims=(48, 89),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/binary_tensor_weight_broadcast_test.py b/tensorflow/contrib/tensorrt/test/binary_tensor_weight_broadcast_test.py
new file mode 100644
index 0000000000..dd673463a5
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/binary_tensor_weight_broadcast_test.py
@@ -0,0 +1,119 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+
+
+class BinaryTensorWeightBroadcastTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Tests for scale & elementwise layers in TF-TRT."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [10, 24, 24, 20]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ # scale
+ a = constant_op.constant(np.random.randn(1), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # scale
+ a = constant_op.constant(np.random.randn(1), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # scale
+ a = constant_op.constant(np.random.randn(24, 1, 1), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # scale
+ a = constant_op.constant(np.random.randn(24, 1, 1), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # scale
+ a = constant_op.constant(np.random.randn(24, 24, 20), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # scale
+ a = constant_op.constant(np.random.randn(24, 24, 20), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(20), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(20), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(1, 24, 1, 1), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(1, 24, 1, 1), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(1, 24, 24, 1), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(1, 24, 24, 1), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(1, 24, 24, 20), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(1, 24, 24, 20), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(24, 20), dtype=dtype)
+ f = a + x
+ x = math_ops.sigmoid(f)
+ # elementwise
+ a = constant_op.constant(np.random.randn(24, 20), dtype=dtype)
+ f = x + a
+ x = math_ops.sigmoid(f)
+ gen_array_ops.reshape(x, [5, -1], name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=16,
+ expected_output_dims=(5, 23040),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/concatenation_test.py b/tensorflow/contrib/tensorrt/test/concatenation_test.py
new file mode 100644
index 0000000000..8c51c45b0a
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/concatenation_test.py
@@ -0,0 +1,83 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.platform import test
+
+
+class ConcatenationTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Testing Concatenation in TF-TRT conversion."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [2, 3, 3, 1]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ # scale
+ a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
+ r1 = x / a
+ a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
+ r2 = a / x
+ a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
+ r3 = a + x
+ a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
+ r4 = x * a
+ a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
+ r5 = x - a
+ a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
+ r6 = a - x
+ a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
+ r7 = x - a
+ a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
+ r8 = a - x
+ a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
+ r9 = gen_math_ops.maximum(x, a)
+ a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
+ r10 = gen_math_ops.minimum(a, x)
+ a = constant_op.constant(np.random.randn(3), dtype=dtype)
+ r11 = x * a
+ a = constant_op.constant(np.random.randn(1), dtype=dtype)
+ r12 = a * x
+ concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1)
+ concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3)
+ x = array_ops.concat([concat1, concat2], axis=-1)
+ gen_array_ops.reshape(x, [2, -1], name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=1,
+ expected_output_dims=(2, 126),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/const_broadcast_test.py b/tensorflow/contrib/tensorrt/test/const_broadcast_test.py
new file mode 100644
index 0000000000..97b29bf05d
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/const_broadcast_test.py
@@ -0,0 +1,68 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.platform import test
+
+
+class ConstBroadcastTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Test for Constant broadcasting in TF-TRT."""
+ dtype = dtypes.float32
+ input_name = 'input'
+ input_dims = [5, 12, 12, 2]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ filt1 = constant_op.constant(
+ 0.3, shape=(3, 3, 2, 1), dtype=dtype, name='filt1')
+ y1 = nn.conv2d(x, filt1, strides=[1, 1, 1, 1], padding='SAME', name='y1')
+ z1 = nn.relu(y1, name='z1')
+ filt2 = constant_op.constant(
+ np.random.randn(9), shape=(3, 3, 1, 1), dtype=dtype, name='filt2')
+ y2 = nn.conv2d(z1, filt2, strides=[1, 1, 1, 1], padding='SAME', name='y2')
+ z2 = nn.relu(y2, name='z')
+ filt3 = constant_op.constant(
+ np.random.randn(3, 3, 1, 1),
+ shape=(3, 3, 1, 1),
+ dtype=dtype,
+ name='filt3')
+ y3 = nn.conv2d(z2, filt3, strides=[1, 1, 1, 1], padding='SAME', name='y3')
+ nn.relu(y3, name='output')
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=1,
+ expected_output_dims=(5, 12, 12, 1),
+ allclose_atol=1.e-02,
+ allclose_rtol=1.e-02)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/multi_connection_neighbor_engine_test.py b/tensorflow/contrib/tensorrt/test/multi_connection_neighbor_engine_test.py
new file mode 100644
index 0000000000..734ccf6345
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/multi_connection_neighbor_engine_test.py
@@ -0,0 +1,87 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.platform import test
+
+
+class MultiConnectionNeighborEngineTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Test for multi connection neighboring nodes wiring tests in TF-TRT."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [2, 3, 7, 5]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ e = constant_op.constant(
+ np.random.normal(.05, .005, [3, 2, 3, 4]),
+ name="weights",
+ dtype=dtype)
+ conv = nn.conv2d(
+ input=x,
+ filter=e,
+ data_format="NCHW",
+ strides=[1, 1, 1, 1],
+ padding="VALID",
+ name="conv")
+ b = constant_op.constant(
+ np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
+ t = conv + b
+
+ b = constant_op.constant(
+ np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
+ q = conv - b
+ edge = math_ops.sigmoid(q)
+
+ b = constant_op.constant(
+ np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
+ d = b + conv
+ edge3 = math_ops.sigmoid(d)
+
+ edge1 = gen_math_ops.tan(conv)
+ t = t - edge1
+ q = q + edge
+ t = t + q
+ t = t + d
+ t = t - edge3
+ array_ops.squeeze(t, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=2,
+ expected_output_dims=(2, 4, 5, 4),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/neighboring_engine_test.py b/tensorflow/contrib/tensorrt/test/neighboring_engine_test.py
new file mode 100644
index 0000000000..50265c0845
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/neighboring_engine_test.py
@@ -0,0 +1,69 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.platform import test
+
+
+class NeighboringEngineTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Neighboring node wiring tests in TF-TRT conversion."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [2, 3, 7, 5]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ e = constant_op.constant(
+ np.random.normal(.3, 0.05, [3, 2, 3, 4]), name="weights", dtype=dtype)
+ conv = nn.conv2d(
+ input=x,
+ filter=e,
+ data_format="NCHW",
+ strides=[1, 1, 1, 1],
+ padding="VALID",
+ name="conv")
+ b = constant_op.constant(
+ np.random.normal(1.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
+ t = conv * b
+ e = gen_math_ops.tan(conv)
+ t = t - e
+ array_ops.squeeze(t, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=2,
+ expected_output_dims=(2, 4, 5, 4),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
deleted file mode 100644
index d9c41f90d0..0000000000
--- a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test.py
+++ /dev/null
@@ -1,347 +0,0 @@
-# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Script to test TF-TensorRT integration."""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from collections import namedtuple
-import itertools
-import warnings
-import numpy as np
-import six
-
-from tensorflow.contrib import tensorrt as trt
-from tensorflow.core.protobuf import config_pb2
-from tensorflow.core.protobuf import rewriter_config_pb2
-from tensorflow.python.framework import constant_op
-from tensorflow.python.framework import dtypes
-from tensorflow.python.framework import importer
-from tensorflow.python.framework import ops
-from tensorflow.python.framework import test_util
-from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import math_ops
-from tensorflow.python.ops import nn
-from tensorflow.python.ops import nn_ops
-from tensorflow.python.platform import test
-
-INPUT_NAME = "input"
-OUTPUT_NAME = "output"
-INPUT_DIMS = [100, 24, 24, 2]
-MODE_FP32 = "FP32"
-MODE_FP16 = "FP16"
-MODE_INT8 = "INT8"
-
-if six.PY2:
- to_bytes = lambda s: s
- to_string = lambda s: s
-else:
- to_bytes = lambda s: s.encode("utf-8", errors="surrogateescape")
- to_string = lambda s: s.decode("utf-8")
-
-
-# TODO(aaroey): test graph with different dtypes.
-def GetSingleEngineGraphDef(dtype=dtypes.float32):
- """Create a graph containing single segment."""
- g = ops.Graph()
- with g.as_default():
- inp = array_ops.placeholder(
- dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
- with g.device("/GPU:0"):
- conv_filter = constant_op.constant(
- [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
- name="weights",
- dtype=dtype)
- conv = nn.conv2d(
- input=inp,
- filter=conv_filter,
- strides=[1, 2, 2, 1],
- padding="SAME",
- name="conv")
- bias = constant_op.constant(
- [4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype)
- added = nn.bias_add(conv, bias, name="bias_add")
- relu = nn.relu(added, "relu")
- identity = array_ops.identity(relu, "identity")
- pool = nn_ops.max_pool(
- identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
- array_ops.squeeze(pool, name=OUTPUT_NAME)
- return g.as_graph_def()
-
-
-# TODO(aaroey): test graph with different dtypes.
-def GetMultiEngineGraphDef(dtype=dtypes.float32):
- """Create a graph containing multiple segment."""
- g = ops.Graph()
- with g.as_default():
- inp = array_ops.placeholder(
- dtype=dtype, shape=[None] + INPUT_DIMS[1:], name=INPUT_NAME)
- with g.device("/GPU:0"):
- conv_filter = constant_op.constant(
- [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
- name="weights",
- dtype=dtype)
- conv = nn.conv2d(
- input=inp,
- filter=conv_filter,
- strides=[1, 2, 2, 1],
- padding="SAME",
- name="conv")
- c1 = constant_op.constant(
- np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
- p = conv * c1
- c2 = constant_op.constant(
- np.random.randn(INPUT_DIMS[0], 12, 12, 6), dtype=dtype)
- q = conv / c2
-
- edge = math_ops.sin(q)
- edge /= edge
- r = edge + edge
-
- p -= edge
- q *= edge
- s = p + q
- s -= r
- array_ops.squeeze(s, name=OUTPUT_NAME)
- return g.as_graph_def()
-
-
-TestGraph = namedtuple("TestGraph",
- ["gdef", "num_expected_engines", "expected_output_dims"])
-
-TEST_GRAPHS = {
- "SingleEngineGraph":
- TestGraph(
- gdef=GetSingleEngineGraphDef(),
- num_expected_engines=1,
- expected_output_dims=(100, 6, 6, 6)),
- "MultiEngineGraph":
- TestGraph(
- gdef=GetMultiEngineGraphDef(),
- num_expected_engines=2,
- expected_output_dims=(100, 12, 12, 6)),
- # TODO(aaroey): add a large complex graph to test.
-}
-
-
-class TfTrtIntegrationTest(test_util.TensorFlowTestCase):
- """Class to test Tensorflow-TensorRT integration."""
-
- def setUp(self):
- """Setup method."""
- super(TfTrtIntegrationTest, self).setUp()
- warnings.simplefilter("always")
- self._input = np.random.random_sample(INPUT_DIMS)
-
- def _GetConfigProto(self,
- use_optimizer,
- precision_mode=None,
- is_dynamic_op=None):
- if use_optimizer:
- rewriter_cfg = rewriter_config_pb2.RewriterConfig()
- rewriter_cfg.optimizers.extend(["constfold", "layout"])
- custom_op = rewriter_cfg.custom_optimizers.add()
- custom_op.name = "TensorRTOptimizer"
- custom_op.parameter_map["minimum_segment_size"].i = 3
- custom_op.parameter_map["max_batch_size"].i = self._input.shape[0]
- custom_op.parameter_map["is_dynamic_op"].b = is_dynamic_op
- custom_op.parameter_map["max_workspace_size_bytes"].i = 1 << 25
- custom_op.parameter_map["precision_mode"].s = to_bytes(precision_mode)
- graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)
- else:
- graph_options = config_pb2.GraphOptions()
-
- gpu_options = config_pb2.GPUOptions()
- if trt.trt_convert.get_linked_tensorrt_version()[0] == 3:
- gpu_options.per_process_gpu_memory_fraction = 0.50
-
- config = config_pb2.ConfigProto(
- gpu_options=gpu_options, graph_options=graph_options)
- return config
-
- def _RunGraph(self, graph_key, gdef, input_data, config, num_runs=2):
- """Run given graphdef multiple times."""
- g = ops.Graph()
- with g.as_default():
- inp, out = importer.import_graph_def(
- graph_def=gdef, return_elements=[INPUT_NAME, OUTPUT_NAME], name="")
- inp = inp.outputs[0]
- out = out.outputs[0]
- with self.test_session(
- graph=g, config=config, use_gpu=True, force_gpu=True) as sess:
- val = None
- # Defaults to 2 runs to verify result across multiple runs is same.
- for _ in range(num_runs):
- new_val = sess.run(out, {inp: input_data})
- self.assertEquals(TEST_GRAPHS[graph_key].expected_output_dims,
- new_val.shape)
- if val is not None:
- self.assertAllEqual(new_val, val)
- val = new_val
- return val
-
- # Use real data that is representative of the inference dataset
- # for calibration. For this test script it is random data.
- def _RunCalibration(self, graph_key, gdef, input_data, config):
- """Run calibration on given graph."""
- return self._RunGraph(graph_key, gdef, input_data, config, 30)
-
- def _GetTrtGraph(self, gdef, precision_mode, is_dynamic_op):
- """Return trt converted graph."""
- return trt.create_inference_graph(
- input_graph_def=gdef,
- outputs=[OUTPUT_NAME],
- max_batch_size=self._input.shape[0],
- max_workspace_size_bytes=1 << 25,
- precision_mode=precision_mode,
- minimum_segment_size=2,
- is_dynamic_op=is_dynamic_op)
-
- def _VerifyGraphDef(self,
- graph_key,
- gdef,
- precision_mode=None,
- is_calibrated=None,
- dynamic_engine=None):
- num_engines = 0
- for n in gdef.node:
- if n.op == "TRTEngineOp":
- num_engines += 1
- self.assertNotEqual("", n.attr["serialized_segment"].s)
- self.assertNotEqual("", n.attr["segment_funcdef_name"].s)
- self.assertEquals(n.attr["precision_mode"].s, precision_mode)
- self.assertEquals(n.attr["static_engine"].b, not dynamic_engine)
- if precision_mode == MODE_INT8 and is_calibrated:
- self.assertNotEqual("", n.attr["calibration_data"].s)
- else:
- self.assertEquals("", n.attr["calibration_data"].s)
- if precision_mode is None:
- self.assertEquals(num_engines, 0)
- else:
- self.assertEquals(num_engines,
- TEST_GRAPHS[graph_key].num_expected_engines)
-
- def _RunTest(self, graph_key, use_optimizer, precision_mode,
- dynamic_infer_engine, dynamic_calib_engine):
- assert precision_mode in [MODE_FP32, MODE_FP16, MODE_INT8]
- input_gdef = TEST_GRAPHS[graph_key].gdef
- self._VerifyGraphDef(graph_key, input_gdef)
-
- # Get reference result without running trt.
- config_no_trt = self._GetConfigProto(False)
- print("Running original graph w/o trt, config:\n%s" % str(config_no_trt))
- ref_result = self._RunGraph(graph_key, input_gdef, self._input,
- config_no_trt)
-
- # Run calibration if necessary.
- if precision_mode == MODE_INT8:
-
- calib_config = self._GetConfigProto(use_optimizer, precision_mode,
- dynamic_calib_engine)
- print("Running calibration graph, config:\n%s" % str(calib_config))
- if use_optimizer:
- self.assertTrue(False)
- # TODO(aaroey): uncomment this and get infer_gdef when this mode is
- # supported.
- # result = self._RunCalibration(graph_key, input_gdef, self._input,
- # calib_config)
- else:
- calib_gdef = self._GetTrtGraph(input_gdef, precision_mode,
- dynamic_calib_engine)
- self._VerifyGraphDef(graph_key, calib_gdef, precision_mode, False,
- dynamic_calib_engine)
- result = self._RunCalibration(graph_key, calib_gdef, self._input,
- calib_config)
- infer_gdef = trt.calib_graph_to_infer_graph(calib_gdef)
- self._VerifyGraphDef(graph_key, infer_gdef, precision_mode, True,
- dynamic_calib_engine)
- self.assertAllClose(ref_result, result, rtol=1.e-03)
- else:
- infer_gdef = input_gdef
-
- # Run inference.
- infer_config = self._GetConfigProto(use_optimizer, precision_mode,
- dynamic_infer_engine)
- print("Running final inference graph, config:\n%s" % str(infer_config))
- if use_optimizer:
- result = self._RunGraph(graph_key, infer_gdef, self._input, infer_config)
- else:
- trt_infer_gdef = self._GetTrtGraph(infer_gdef, precision_mode,
- dynamic_infer_engine)
- self._VerifyGraphDef(graph_key, trt_infer_gdef, precision_mode, True,
- dynamic_infer_engine)
- result = self._RunGraph(graph_key, trt_infer_gdef, self._input,
- infer_config)
- self.assertAllClose(ref_result, result, rtol=1.e-03)
-
- def testIdempotence(self):
- # Test that applying tensorrt optimizer or offline conversion tools multiple
- # times to the same graph will result in same graph.
- # TODO(aaroey): implement this.
- pass
-
-
-def GetTests():
-
- def _GetTest(g, u, p, i, c):
-
- def _Test(self):
- print("Running test with parameters: graph_key=%s, use_optimizer=%s, "
- "precision_mode=%s, dynamic_infer_engine=%s, "
- "dynamic_calib_engine=%s" % (g, u, p, i, c))
- self._RunTest(g, u, p, i, c)
-
- return _Test
-
- use_optimizer_options = [False, True]
- precision_mode_options = [MODE_FP32, MODE_FP16, MODE_INT8]
- dynamic_infer_engine_options = [False, True]
- dynamic_calib_engine_options = [False, True]
- for (graph_key, use_optimizer, precision_mode,
- dynamic_infer_engine, dynamic_calib_engine) in itertools.product(
- TEST_GRAPHS, use_optimizer_options, precision_mode_options,
- dynamic_infer_engine_options, dynamic_calib_engine_options):
- if precision_mode == MODE_INT8:
- if not dynamic_calib_engine and dynamic_infer_engine:
- # TODO(aaroey): test this case, the conversion from static calibration
- # engine to dynamic inference engine should be a noop.
- continue
- if use_optimizer:
- # TODO(aaroey): if use_optimizer is True we need to get the inference
- # graphdef using custom python wrapper class, which is not currently
- # supported yet.
- continue
- if not dynamic_calib_engine:
- # TODO(aaroey): construction of static calibration engine is not
- # supported yet.
- continue
- if dynamic_calib_engine and not dynamic_infer_engine:
- # TODO(aaroey): construction of static inference engine using dynamic
- # calibration engine is not supported yet.
- continue
- else: # In non int8 mode.
- if dynamic_calib_engine:
- # dynamic_calib_engine doesn't affect non-int8 modes, so just let
- # related tests run once on dynamic_calib_engine=False.
- continue
- yield _GetTest(graph_key, use_optimizer, precision_mode,
- dynamic_infer_engine, dynamic_calib_engine)
-
-
-if __name__ == "__main__":
- for index, t in enumerate(GetTests()):
- setattr(TfTrtIntegrationTest, "testTfTRT_" + str(index), t)
- test.main()
diff --git a/tensorflow/contrib/tensorrt/test/tf_trt_integration_test_base.py b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test_base.py
new file mode 100644
index 0000000000..60b8eb6e81
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/tf_trt_integration_test_base.py
@@ -0,0 +1,328 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Utilities to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from collections import namedtuple
+import itertools
+import warnings
+import numpy as np
+import six
+
+from tensorflow.contrib.tensorrt.python import trt_convert
+# pylint: disable=unused-import
+from tensorflow.contrib.tensorrt.python.ops import trt_engine_op
+# pylint: enable=unused-import
+from tensorflow.core.protobuf import config_pb2
+from tensorflow.core.protobuf import rewriter_config_pb2
+from tensorflow.python.framework import importer
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import tf_logging as logging
+
+TfTrtIntegrationTestParams = namedtuple("TfTrtIntegrationTestParams", [
+ "gdef", "input_names", "input_dims", "num_expected_engines",
+ "expected_output_dims", "allclose_atol", "allclose_rtol"
+])
+
+PRECISION_MODES = ["FP32", "FP16", "INT8"]
+
+
+def _IsQuantizationMode(mode):
+ return mode == "INT8"
+
+
+class TfTrtIntegrationTestBase(test_util.TensorFlowTestCase):
+ """Class to test Tensorflow-TensorRT integration."""
+
+ @property
+ def output_name(self):
+ return "output"
+
+ @property
+ def trt_incompatible_op(self):
+ return math_ops.sin
+
+ @property
+ def precision_modes(self):
+ return ["FP32", "FP16", "INT8"]
+
+ def _ToBytes(self, s):
+ if six.PY2:
+ return s
+ else:
+ return s.encode("utf-8")
+
+ def _ToString(self, s):
+ if six.PY2:
+ return s
+ else:
+ return s.decode("utf-8")
+
+ def setUp(self):
+ """Setup method."""
+ super(TfTrtIntegrationTestBase, self).setUp()
+ warnings.simplefilter("always")
+
+ def GetParams(self):
+ """Return a TfTrtIntegrationTestParams for test, implemented by subclass."""
+ raise NotImplementedError()
+
+ def _GetConfigProto(self,
+ params,
+ use_optimizer,
+ precision_mode=None,
+ is_dynamic_op=None):
+ """Get config proto based on specific settings."""
+ if use_optimizer:
+ rewriter_cfg = rewriter_config_pb2.RewriterConfig()
+ rewriter_cfg.optimizers.extend(["constfold", "layout"])
+ custom_op = rewriter_cfg.custom_optimizers.add()
+ custom_op.name = "TensorRTOptimizer"
+ custom_op.parameter_map["minimum_segment_size"].i = 3
+ custom_op.parameter_map["max_batch_size"].i = max(
+ [dims[0] for dims in params.input_dims])
+ custom_op.parameter_map["is_dynamic_op"].b = is_dynamic_op
+ custom_op.parameter_map["max_workspace_size_bytes"].i = 1 << 25
+ custom_op.parameter_map["precision_mode"].s = self._ToBytes(
+ precision_mode)
+ graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_cfg)
+ else:
+ graph_options = config_pb2.GraphOptions()
+
+ gpu_options = config_pb2.GPUOptions()
+ if trt_convert.get_linked_tensorrt_version()[0] == 3:
+ gpu_options.per_process_gpu_memory_fraction = 0.50
+
+ config = config_pb2.ConfigProto(
+ gpu_options=gpu_options, graph_options=graph_options)
+ return config
+
+ def _RunGraph(self, params, gdef, input_data, config, num_runs=2):
+ """Run given graphdef multiple times."""
+ assert len(params.input_names) == len(input_data)
+ g = ops.Graph()
+ with g.as_default():
+ io_ops = importer.import_graph_def(
+ graph_def=gdef,
+ return_elements=params.input_names + [self.output_name],
+ name="")
+ inp = [i.outputs[0] for i in io_ops[:-1]]
+ assert len(inp) == len(input_data)
+ out = io_ops[-1].outputs[0]
+ with self.test_session(
+ graph=g, config=config, use_gpu=True, force_gpu=True) as sess:
+ val = None
+ # Defaults to 2 runs to verify result across multiple runs is same.
+ for _ in range(num_runs):
+ new_val = sess.run(out,
+ {inp[i]: input_data[i] for i in range(len(inp))})
+ self.assertEqual(params.expected_output_dims, new_val.shape)
+ if val is not None:
+ self.assertAllEqual(val, new_val)
+ val = new_val
+ return val
+
+ # Use real data that is representative of the inference dataset
+ # for calibration. For this test script it is random data.
+ def _RunCalibration(self, params, gdef, input_data, config):
+ """Run calibration on given graph."""
+ return self._RunGraph(params, gdef, input_data, config, 30)
+
+ def _GetTrtGraphDef(self, params, gdef, precision_mode, is_dynamic_op):
+ """Return trt converted graphdef."""
+ return trt_convert.create_inference_graph(
+ input_graph_def=gdef,
+ outputs=[self.output_name],
+ max_batch_size=max([dims[0] for dims in params.input_dims]),
+ max_workspace_size_bytes=1 << 25,
+ precision_mode=precision_mode,
+ minimum_segment_size=2,
+ is_dynamic_op=is_dynamic_op)
+
+ def _VerifyGraphDef(self,
+ params,
+ gdef,
+ precision_mode=None,
+ is_calibrated=None,
+ dynamic_engine=None):
+ num_engines = 0
+ for n in gdef.node:
+ # TODO(jie): we should have coverage for failed conversion (TF fallback).
+ # where the conversion will fail and we shouldn't count this engine as the
+ # converted engines.
+ if n.op == "TRTEngineOp":
+ num_engines += 1
+ self.assertNotEqual(self._ToBytes(""), n.attr["serialized_segment"].s)
+ self.assertNotEqual(self._ToBytes(""), n.attr["segment_funcdef_name"].s)
+ self.assertEqual(
+ self._ToBytes(precision_mode), n.attr["precision_mode"].s)
+ self.assertEqual(not dynamic_engine, n.attr["static_engine"].b)
+ if _IsQuantizationMode(precision_mode) and is_calibrated:
+ self.assertNotEqual(self._ToBytes(""), n.attr["calibration_data"].s)
+ else:
+ self.assertEqual(self._ToBytes(""), n.attr["calibration_data"].s)
+ if precision_mode is None: # This means gdef is the original GraphDef.
+ self.assertEqual(0, num_engines)
+ else:
+ self.assertEqual(num_engines, params.num_expected_engines)
+
+ def RunTest(self, params, use_optimizer, precision_mode,
+ dynamic_infer_engine, dynamic_calib_engine):
+ assert precision_mode in PRECISION_MODES
+ input_data = [np.random.random_sample(dims) for dims in params.input_dims]
+ input_gdef = params.gdef
+ self._VerifyGraphDef(params, input_gdef)
+
+ # Get reference result without running trt.
+ config_no_trt = self._GetConfigProto(params, False)
+ logging.info("Running original graph w/o trt, config:\n%s",
+ str(config_no_trt))
+ ref_result = self._RunGraph(params, input_gdef, input_data, config_no_trt)
+
+ # Run calibration if necessary.
+ if _IsQuantizationMode(precision_mode):
+
+ calib_config = self._GetConfigProto(params, use_optimizer, precision_mode,
+ dynamic_calib_engine)
+ logging.info("Running calibration graph, config:\n%s", str(calib_config))
+ if use_optimizer:
+ self.assertTrue(False)
+ # TODO(aaroey): uncomment this and get infer_gdef when this mode is
+ # supported.
+ # result = self._RunCalibration(params, input_gdef, input_data,
+ # calib_config)
+ else:
+ calib_gdef = self._GetTrtGraphDef(params, input_gdef, precision_mode,
+ dynamic_calib_engine)
+ self._VerifyGraphDef(params, calib_gdef, precision_mode, False,
+ dynamic_calib_engine)
+ result = self._RunCalibration(params, calib_gdef, input_data,
+ calib_config)
+ infer_gdef = trt_convert.calib_graph_to_infer_graph(calib_gdef)
+ self._VerifyGraphDef(params, infer_gdef, precision_mode, True,
+ dynamic_calib_engine)
+
+ self.assertAllClose(
+ ref_result,
+ result,
+ atol=params.allclose_atol,
+ rtol=params.allclose_rtol)
+ else:
+ infer_gdef = input_gdef
+
+ # Run inference.
+ infer_config = self._GetConfigProto(params, use_optimizer, precision_mode,
+ dynamic_infer_engine)
+ logging.info("Running final inference graph, config:\n%s",
+ str(infer_config))
+ if use_optimizer:
+ result = self._RunGraph(params, infer_gdef, input_data, infer_config)
+ else:
+ trt_infer_gdef = self._GetTrtGraphDef(params, infer_gdef, precision_mode,
+ dynamic_infer_engine)
+ self._VerifyGraphDef(params, trt_infer_gdef, precision_mode, True,
+ dynamic_infer_engine)
+ result = self._RunGraph(params, trt_infer_gdef, input_data, infer_config)
+
+ self.assertAllClose(
+ ref_result,
+ result,
+ atol=params.allclose_atol,
+ rtol=params.allclose_rtol)
+
+ def testIdempotence(self):
+ # Test that applying tensorrt optimizer or offline conversion tools multiple
+ # times to the same graph will result in same graph.
+ #
+ # TODO(aaroey): currently the conversion is not deterministic, this is
+ # mainly because during tensorflow::ConvertGraphDefToGraph(), the graph uses
+ # EdgeSet which use a map keyed by Edge*, so the order of input/output edges
+ # of a node is nondeterministic, thus the order for segmenter to contract
+ # edges is nondeterministic. Need to evaluate whether we should fix this.
+ pass
+
+
+def _AddTests(test_class):
+ """Adds test methods to TfTrtIntegrationTestBase."""
+
+ def _GetTest(use_optimizer, precision_mode, dynamic_infer_engine,
+ dynamic_calib_engine):
+ """Gets a single test method based on the parameters."""
+
+ def _Test(self):
+ params = self.GetParams()
+ logging.info(
+ "Running test with parameters: use_optimizer=%s, precision_mode=%s, "
+ "dynamic_infer_engine=%s, dynamic_calib_engine=%s", use_optimizer,
+ precision_mode, dynamic_infer_engine, dynamic_calib_engine)
+ self.RunTest(params, use_optimizer, precision_mode, dynamic_infer_engine,
+ dynamic_calib_engine)
+
+ return _Test
+
+ use_optimizer_options = [False, True]
+ dynamic_infer_engine_options = [False, True]
+ dynamic_calib_engine_options = [False, True]
+ for (use_optimizer, precision_mode,
+ dynamic_infer_engine, dynamic_calib_engine) in itertools.product(
+ use_optimizer_options, PRECISION_MODES, dynamic_infer_engine_options,
+ dynamic_calib_engine_options):
+ if _IsQuantizationMode(precision_mode):
+ if not dynamic_calib_engine and dynamic_infer_engine:
+ # TODO(aaroey): test this case, the conversion from static calibration
+ # engine to dynamic inference engine should be a noop.
+ continue
+ if use_optimizer:
+ # TODO(aaroey): if use_optimizer is True we need to get the inference
+ # graphdef using custom python wrapper class, which is not currently
+ # supported yet.
+ continue
+ if not dynamic_calib_engine:
+ # TODO(aaroey): construction of static calibration engine is not
+ # supported yet.
+ continue
+ if dynamic_calib_engine and not dynamic_infer_engine:
+ # TODO(aaroey): construction of static inference engine using dynamic
+ # calibration engine is not supported yet.
+ continue
+ else: # In non int8 mode.
+ if dynamic_calib_engine:
+ # dynamic_calib_engine doesn't affect non-int8 modes, so just let
+ # related tests run once on dynamic_calib_engine=False.
+ continue
+
+ conversion = "OptimizerConversion" if use_optimizer else "ToolConversion"
+ infer_engine_type = ("DynamicInferEngine"
+ if dynamic_infer_engine else "StaticInferEngine")
+ calib_engine_type = ""
+ if precision_mode == "INT8":
+ calib_engine_type = ("DynamicCalibEngine"
+ if dynamic_calib_engine else "StaticCalibEngine")
+ test_name = "%s_%s_%s%s" % (conversion, precision_mode, infer_engine_type,
+ ("_" + calib_engine_type)
+ if len(calib_engine_type) else "")
+ setattr(
+ test_class, "testTfTRT_" + test_name,
+ _GetTest(use_optimizer, precision_mode, dynamic_infer_engine,
+ dynamic_calib_engine))
+
+
+if trt_convert.is_tensorrt_enabled():
+ _AddTests(TfTrtIntegrationTestBase)
diff --git a/tensorflow/contrib/tensorrt/test/unary_test.py b/tensorflow/contrib/tensorrt/test/unary_test.py
new file mode 100644
index 0000000000..b9e977cf67
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/unary_test.py
@@ -0,0 +1,110 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_array_ops
+from tensorflow.python.ops import gen_math_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import test
+
+
+class UnaryTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Test for unary operations in TF-TRT."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [12, 5, 8, 1, 1, 12]
+ input2_name = "input_2"
+ input2_dims = [12, 5, 8, 1, 12, 1, 1]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ q = math_ops.abs(x)
+ q = q + 1.0
+ q = gen_math_ops.exp(q)
+ q = gen_math_ops.log(q)
+ q = array_ops.squeeze(q, axis=-2)
+ q = math_ops.abs(q)
+ q = q + 2.2
+ q = gen_math_ops.sqrt(q)
+ q = gen_math_ops.rsqrt(q)
+ q = math_ops.negative(q)
+ q = array_ops.squeeze(q, axis=3)
+ q = math_ops.abs(q)
+ q = q + 3.0
+ a = gen_math_ops.reciprocal(q)
+
+ x = constant_op.constant(np.random.randn(5, 8, 12), dtype=dtype)
+ q = math_ops.abs(x)
+ q = q + 2.0
+ q = gen_math_ops.exp(q)
+ q = gen_math_ops.log(q)
+ q = math_ops.abs(q)
+ q = q + 2.1
+ q = gen_math_ops.sqrt(q)
+ q = gen_math_ops.rsqrt(q)
+ q = math_ops.negative(q)
+ q = math_ops.abs(q)
+ q = q + 4.0
+ b = gen_math_ops.reciprocal(q)
+
+ # TODO(jie): this one will break, broadcasting on batch.
+ x = array_ops.placeholder(
+ dtype=dtype, shape=input2_dims, name=input2_name)
+ q = math_ops.abs(x)
+ q = q + 5.0
+ q = gen_math_ops.exp(q)
+ q = array_ops.squeeze(q, axis=[-1, -2, 3])
+ q = gen_math_ops.log(q)
+ q = math_ops.abs(q)
+ q = q + 5.1
+ q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12])
+ q = array_ops.squeeze(q, axis=[5, 2, 3])
+ q = gen_math_ops.sqrt(q)
+ q = math_ops.abs(q)
+ q = q + 5.2
+ q = gen_math_ops.rsqrt(q)
+ q = math_ops.negative(q)
+ q = math_ops.abs(q)
+ q = q + 5.3
+ c = gen_math_ops.reciprocal(q)
+
+ q = a * b
+ q = q / c
+ array_ops.squeeze(q, name=self.output_name)
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name, input2_name],
+ input_dims=[input_dims, input2_dims],
+ num_expected_engines=5,
+ expected_output_dims=(12, 5, 8, 12),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/vgg_block_nchw_test.py b/tensorflow/contrib/tensorrt/test/vgg_block_nchw_test.py
new file mode 100644
index 0000000000..2b134c3bce
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/vgg_block_nchw_test.py
@@ -0,0 +1,82 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.ops import nn_impl
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
+
+
+class VGGBlockNCHWTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Single vgg layer in NCHW unit tests in TF-TRT."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [5, 2, 8, 8]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ x, _, _ = nn_impl.fused_batch_norm(
+ x,
+ np.random.randn(2).astype(np.float32),
+ np.random.randn(2).astype(np.float32),
+ mean=np.random.randn(2).astype(np.float32),
+ variance=np.random.randn(2).astype(np.float32),
+ data_format="NCHW",
+ is_training=False)
+ e = constant_op.constant(
+ np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
+ conv = nn.conv2d(
+ input=x,
+ filter=e,
+ data_format="NCHW",
+ strides=[1, 1, 2, 2],
+ padding="SAME",
+ name="conv")
+ b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
+ t = nn.bias_add(conv, b, data_format="NCHW", name="biasAdd")
+ relu = nn.relu(t, "relu")
+ idty = array_ops.identity(relu, "ID")
+ v = nn_ops.max_pool(
+ idty, [1, 1, 2, 2], [1, 1, 2, 2],
+ "VALID",
+ data_format="NCHW",
+ name="max_pool")
+ array_ops.squeeze(v, name="output")
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=1,
+ expected_output_dims=(5, 6, 2, 2),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/test/vgg_block_test.py b/tensorflow/contrib/tensorrt/test/vgg_block_test.py
new file mode 100644
index 0000000000..bec2f23eff
--- /dev/null
+++ b/tensorflow/contrib/tensorrt/test/vgg_block_test.py
@@ -0,0 +1,73 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Model script to test TF-TensorRT integration."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
+from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import nn
+from tensorflow.python.ops import nn_impl
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.platform import test
+
+
+class VGGBlockTest(trt_test.TfTrtIntegrationTestBase):
+
+ def GetParams(self):
+ """Single vgg layer test in TF-TRT conversion."""
+ dtype = dtypes.float32
+ input_name = "input"
+ input_dims = [5, 8, 8, 2]
+ g = ops.Graph()
+ with g.as_default():
+ x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
+ x, _, _ = nn_impl.fused_batch_norm(
+ x,
+ np.random.randn(2).astype(np.float32),
+ np.random.randn(2).astype(np.float32),
+ mean=np.random.randn(2).astype(np.float32),
+ variance=np.random.randn(2).astype(np.float32),
+ is_training=False)
+ e = constant_op.constant(
+ np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype)
+ conv = nn.conv2d(
+ input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
+ b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype)
+ t = nn.bias_add(conv, b, name="biasAdd")
+ relu = nn.relu(t, "relu")
+ idty = array_ops.identity(relu, "ID")
+ v = nn_ops.max_pool(
+ idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
+ array_ops.squeeze(v, name="output")
+ return trt_test.TfTrtIntegrationTestParams(
+ gdef=g.as_graph_def(),
+ input_names=[input_name],
+ input_dims=[input_dims],
+ num_expected_engines=1,
+ expected_output_dims=(5, 2, 2, 6),
+ allclose_atol=1.e-03,
+ allclose_rtol=1.e-03)
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/contrib/tensorrt/trt_conversion.i b/tensorflow/contrib/tensorrt/trt_conversion.i
index d6628cd1eb..422740fdf6 100644
--- a/tensorflow/contrib/tensorrt/trt_conversion.i
+++ b/tensorflow/contrib/tensorrt/trt_conversion.i
@@ -100,6 +100,7 @@ _LIST_OUTPUT_TYPEMAP(int, PyLong_FromLong);
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/util/stat_summarizer.h"
#include "tensorflow/contrib/tensorrt/convert/convert_graph.h"
+#include "tensorflow/contrib/tensorrt/convert/utils.h"
%}
%ignoreall
@@ -108,6 +109,7 @@ _LIST_OUTPUT_TYPEMAP(int, PyLong_FromLong);
%unignore calib_convert;
%unignore get_linked_tensorrt_version;
%unignore get_loaded_tensorrt_version;
+%unignore is_tensorrt_enabled;
%{
@@ -140,7 +142,7 @@ std::pair<string, string> trt_convert(
return std::pair<string, string>{out_status, ""};
}
- if(precision_mode < 0 || precision_mode > 2){
+ if (precision_mode < 0 || precision_mode > 2) {
out_status = "InvalidArgument;Invalid precision_mode";
return std::pair<string, string>{out_status, ""};
}
@@ -232,7 +234,8 @@ version_struct get_linked_tensorrt_version() {
#endif // GOOGLE_CUDA && GOOGLE_TENSORRT
return s;
}
-version_struct get_loaded_tensorrt_version(){
+
+version_struct get_loaded_tensorrt_version() {
// Return the version from the loaded library.
version_struct s;
#if GOOGLE_CUDA && GOOGLE_TENSORRT
@@ -244,6 +247,10 @@ version_struct get_loaded_tensorrt_version(){
return s;
}
+bool is_tensorrt_enabled() {
+ return tensorflow::tensorrt::IsGoogleTensorRTEnabled();
+}
+
%}
std::pair<string, string> calib_convert(string graph_def_string, bool is_dyn_op);
@@ -258,5 +265,6 @@ std::pair<string, string> trt_convert(string graph_def_string,
std::vector<int> cached_engine_batches);
version_struct get_linked_tensorrt_version();
version_struct get_loaded_tensorrt_version();
+bool is_tensorrt_enabled();
%unignoreall
diff --git a/tensorflow/contrib/tpu/BUILD b/tensorflow/contrib/tpu/BUILD
index 0d1c7fc75a..643a7cc13a 100644
--- a/tensorflow/contrib/tpu/BUILD
+++ b/tensorflow/contrib/tpu/BUILD
@@ -16,7 +16,6 @@ package(
"//cloud/vmm/testing/tests/tpu:__subpackages__",
"//learning/brain:__subpackages__",
"//tensorflow:__subpackages__",
- "//third_party/cloud_tpu:__subpackages__",
],
)
@@ -37,6 +36,7 @@ cc_library(
py_library(
name = "tpu_estimator",
srcs = [
+ "python/tpu/error_handling.py",
"python/tpu/tpu_config.py",
"python/tpu/tpu_context.py",
"python/tpu/tpu_estimator.py",
@@ -166,6 +166,17 @@ py_library(
"python/tpu/keras_support.py",
],
srcs_version = "PY2AND3",
+ visibility = [
+ "//cloud/vmm/testing/tests/tpu:__subpackages__",
+ "//learning/brain:__subpackages__",
+ # TODO(b/111651964): Clean special visibility for keras_support.
+ #
+ # Note: If you are an end user, please do not add your project to this
+ # visibility. This feature is experimental, and will be made public
+ # when ready.
+ "//third_party/cloud_tpu/models/keras:__subpackages__",
+ "//tensorflow:__subpackages__",
+ ],
deps = [
":tpu_lib",
":tpu_py",
@@ -175,12 +186,16 @@ py_library(
"//tensorflow/contrib/tpu/proto:compilation_result_proto_py",
"//tensorflow/core:protos_all_py",
"//tensorflow/python:array_ops",
+ "//tensorflow/python:dtypes",
"//tensorflow/python:framework_ops",
+ "//tensorflow/python:linalg_ops",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform",
+ "//tensorflow/python:random_ops",
"//tensorflow/python:session",
"//tensorflow/python:tensor_spec",
"//tensorflow/python:variable_scope",
+ "//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/estimator:model_fn",
"//tensorflow/python/keras:backend",
"//tensorflow/python/keras:engine",
diff --git a/tensorflow/contrib/tpu/__init__.py b/tensorflow/contrib/tpu/__init__.py
index d62338680e..d5484e9032 100644
--- a/tensorflow/contrib/tpu/__init__.py
+++ b/tensorflow/contrib/tpu/__init__.py
@@ -42,10 +42,10 @@
@@TPUEstimator
@@TPUEstimatorSpec
+@@export_estimator_savedmodel
@@RunConfig
@@InputPipelineConfig
@@TPUConfig
-
@@bfloat16_scope
"""
diff --git a/tensorflow/contrib/tpu/proto/BUILD b/tensorflow/contrib/tpu/proto/BUILD
index 26016f47df..598b73b438 100644
--- a/tensorflow/contrib/tpu/proto/BUILD
+++ b/tensorflow/contrib/tpu/proto/BUILD
@@ -15,6 +15,16 @@ tf_proto_library(
"tpu_embedding_config.proto",
],
cc_api_version = 2,
+ protodeps = [":optimization_parameters_proto"],
+ visibility = ["//visibility:public"],
+)
+
+tf_proto_library(
+ name = "optimization_parameters_proto",
+ srcs = [
+ "optimization_parameters.proto",
+ ],
+ cc_api_version = 2,
visibility = ["//visibility:public"],
)
diff --git a/tensorflow/contrib/tpu/proto/optimization_parameters.proto b/tensorflow/contrib/tpu/proto/optimization_parameters.proto
new file mode 100644
index 0000000000..9150606f5e
--- /dev/null
+++ b/tensorflow/contrib/tpu/proto/optimization_parameters.proto
@@ -0,0 +1,162 @@
+syntax = "proto2";
+
+package tensorflow.tpu;
+
+message ClippingLimits {
+ optional float lower = 1 [default = -inf];
+ optional float upper = 2 [default = inf];
+}
+
+// Get the learning rate from a <yet to be determined> source that can change
+// dynamically.
+message DynamicLearningRate {
+}
+
+// Source of learning rate to use.
+message LearningRate {
+ oneof learning_rate {
+ float constant = 1;
+ DynamicLearningRate dynamic = 2;
+ }
+}
+
+message AdagradParameters {
+ optional float initial_accumulator = 1 [default = 0.];
+}
+
+message StochasticGradientDescentParameters {
+}
+
+message FtrlParameters {
+ optional float l1 = 1 [default = 0.];
+ optional float l2 = 2 [default = 0.];
+ optional float lr_power = 3 [default = 0.];
+ optional float initial_accum = 4 [default = 0.];
+ optional float initial_linear = 5 [default = 0.];
+}
+
+// The Adam optimizer does not implement hyper-parameter update; use the dynamic
+// learning rate feature instead, setting the learning rate to:
+// user learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
+// Here, t is the current timestep.
+// https://github.com/tensorflow/tensorflow/blob/ab51450c817674c8ff08a7ae4f8ac50cdc4bed8b/tensorflow/python/training/adam.py#L54
+message AdamParameters {
+ optional float beta1 = 3 [default = 0.];
+ optional float beta2 = 4 [default = 0.];
+ optional float epsilon = 5 [default = 0.];
+ optional float initial_m = 6 [default = 0.];
+ optional float initial_v = 7 [default = 0.];
+}
+
+message MomentumParameters {
+ optional float momentum = 1 [default = 0.];
+ optional bool use_nesterov = 2 [default = false];
+ optional float initial_accum = 3 [default = 0.];
+}
+
+message RmsPropParameters {
+ optional float rho = 1 [default = 0.];
+ optional float momentum = 2 [default = 0.];
+ optional float epsilon = 3 [default = 0.];
+ optional float initial_ms = 4 [default = 0.];
+ optional float initial_mom = 5 [default = 0.];
+}
+
+message CenteredRmsPropParameters {
+ optional float rho = 1 [default = 0.];
+ optional float momentum = 2 [default = 0.];
+ optional float epsilon = 3 [default = 0.];
+ optional float initial_ms = 4 [default = 0.];
+ optional float initial_mom = 5 [default = 0.];
+ optional float initial_mg = 6 [default = 0.];
+}
+
+message MdlAdagradLightParameters {
+ optional float l2 = 1;
+ optional float lr_power = 2;
+ optional float min_servable_mdl_benefit = 3;
+ optional float mdl_mix_in_margin = 4;
+ optional float mdl_benefit_rampup_coeff = 5;
+ optional float mdl_min_weight = 6;
+ optional float benefit_revisit_scale = 7;
+ optional float max_event_benefit = 8;
+ optional float max_total_benefit = 9;
+ optional float mdl_hard_limit = 10;
+ optional bool hard_limit_min_benefit = 11;
+ optional bool mdl_regularize = 12;
+ optional float initial_accumulator = 13;
+ optional float initial_weight = 14;
+ optional float initial_benefit = 15;
+}
+
+message AdadeltaParameters {
+ optional float rho = 1;
+ optional float epsilon = 2;
+ optional float initial_accumulator = 3 [default = 0.];
+ optional float initial_update = 4 [default = 0.];
+}
+
+message ProximalAdagradParameters {
+ optional float l1 = 1;
+ optional float l2 = 2;
+ optional float initial_accumulator = 3;
+}
+
+message OptimizationParameters {
+ // Learning rate used for updating the embedding layer parameters.
+ optional LearningRate learning_rate = 13;
+ reserved 1; // Old learning rate tag.
+
+ // Limits to which to clip the weight values after the backward pass; not
+ // present means no limits are applied.
+ optional ClippingLimits clipping_limits = 2;
+
+ // Limits to which to clip the backward pass gradient before using it for
+ // updates; not present means no limits are applied.
+ optional ClippingLimits gradient_clipping_limits = 7;
+
+ // Whether to use gradient accumulation (do two passes over the input
+ // gradients: one to accumulate them into a temporary array and another to
+ // apply them using the actual optimization algorithm).
+ optional bool use_gradient_accumulation = 15 [default = false];
+
+ // Optimization algorithm parameters; which field is selected determines which
+ // algorithm to use.
+ oneof parameters {
+ AdagradParameters adagrad = 3;
+ StochasticGradientDescentParameters stochastic_gradient_descent = 4;
+ FtrlParameters ftrl = 5;
+ AdamParameters adam = 6;
+ MomentumParameters momentum = 8;
+ RmsPropParameters rms_prop = 9;
+ CenteredRmsPropParameters centered_rms_prop = 10;
+ MdlAdagradLightParameters mdl_adagrad_light = 11;
+ AdadeltaParameters adadelta = 12;
+ ProximalAdagradParameters proximal_adagrad = 14;
+ }
+}
+
+// Specification of an optimization algorithm's state variables (both the main
+// value vector and any extra accumulators, etc.).
+message StateVariableSpecification {
+ // Parameter name for the state variable.
+ optional string name = 1;
+
+ // A normal state variable that should be saved and restored in checkpoints
+ // and used as an input or output to non-debug TensorFlow ops.
+ message UserDefined {
+ }
+
+ // A state variable that should be filled with a constant and normally hidden
+ // from users (used for intermediate gradients being accumulated, for
+ // example).
+ message FillWithConstant {
+ optional double initial_value = 1;
+ }
+
+ // Usage type of this state variable.
+ oneof usage {
+ UserDefined user_defined = 2;
+ FillWithConstant fill_with_constant = 3;
+ }
+}
diff --git a/tensorflow/contrib/tpu/proto/tpu_embedding_config.proto b/tensorflow/contrib/tpu/proto/tpu_embedding_config.proto
index b0ec968d3a..3476cc8953 100644
--- a/tensorflow/contrib/tpu/proto/tpu_embedding_config.proto
+++ b/tensorflow/contrib/tpu/proto/tpu_embedding_config.proto
@@ -2,6 +2,8 @@ syntax = "proto3";
package tensorflow.tpu;
+import "tensorflow/contrib/tpu/proto/optimization_parameters.proto";
+
// The TPUEmbeddingConfiguration contains specification of TPU Embedding lookups
// and gradient updates separate from the TF Graph.
message TPUEmbeddingConfiguration {
@@ -30,15 +32,6 @@ message TPUEmbeddingConfiguration {
// The number of training examples per TensorNode.
int32 batch_size = 4;
- message GradientDescentOptimizer {
- float learning_rate = 1;
- }
-
- message AdagradOptimizer {
- float learning_rate = 1;
- float initial_accumulator = 2;
- }
-
// Each Embedding
message TPUEmbeddingTable {
// Name of the embedding table. This will be used to name Variables in the
@@ -66,10 +59,7 @@ message TPUEmbeddingConfiguration {
// separately to the convolutional or recurrent network.
int32 num_features = 5;
- oneof optimizer {
- GradientDescentOptimizer gradient_descent = 6;
- AdagradOptimizer adagrad = 7;
- }
+ OptimizationParameters optimization_parameters = 6;
}
repeated TPUEmbeddingTable table_config = 5;
diff --git a/tensorflow/contrib/tpu/python/tpu/error_handling.py b/tensorflow/contrib/tpu/python/tpu/error_handling.py
new file mode 100644
index 0000000000..14659fe68f
--- /dev/null
+++ b/tensorflow/contrib/tpu/python/tpu/error_handling.py
@@ -0,0 +1,132 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ===================================================================
+"""ErrorRendezvous handler for collecting errors from multiple threads."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import contextlib
+import threading
+import time
+import traceback
+
+
+from tensorflow.python.framework import errors
+from tensorflow.python.platform import tf_logging as logging
+
+_UNINTERESTING_ERRORS = (errors.CancelledError,)
+
+
+class ErrorRendezvous(object):
+ """Resolve errors from multiple threads during TPU execution.
+
+ TPU errors can occur on the infeed or outfeed threads as well as the main
+ training thread.
+
+ Depending on which thread "wins" and receives the session error first, we may
+ end up showing users a confusing and non-actionable error message (session
+ cancelled) instead of a root cause (e.g. a bad filename).
+
+ The rendezvous object provides a location to capture these errors until all
+ threads terminate. At that point we can choose the most informative error
+ to report.
+ """
+
+ def __init__(self, num_sources):
+ # string -> (message, traceback)
+ self._errors = {}
+ self._num_sources = num_sources
+ self._session_cancel_timer = None
+
+ def record_error(self, source, exception, session=None):
+ """Report an exception from the given source.
+
+ If a session is passed, a timer will be registered to close it after a few
+ seconds. This is necessary to ensure the main training loop does not hang
+ if an infeed/oufeed error occurs. We sleep a few seconds to allow a more
+ interesting error from another thread to propagate.
+
+ Args:
+ source: string, source of the error
+ exception: Exception being thrown
+ session: Session to close after delay.
+ """
+ logging.info('Error recorded from %s: %s', source, exception)
+ stack_trace = traceback.format_exc()
+ self._errors[source] = (exception, stack_trace)
+
+ if session is not None and self._session_cancel_timer is None:
+
+ def _cancel_session():
+ time.sleep(5)
+ try:
+ session.close()
+ except: # pylint: disable=bare-except
+ pass
+
+ self._session_cancel_timer = threading.Thread(target=_cancel_session,)
+ self._session_cancel_timer.daemon = True
+ self._session_cancel_timer.start()
+
+ def record_done(self, source):
+ """Mark execution source `source` as done.
+
+ If an error was originally reported from `source` it is left intact.
+
+ Args:
+ source: `str`, source being recorded
+ """
+ logging.info('%s marked as finished', source)
+ if source not in self._errors:
+ self._errors[source] = None
+
+ @contextlib.contextmanager
+ def catch_errors(self, source, session=None):
+ """Context manager to report any errors within a block."""
+ try:
+ yield
+ except Exception as e: # pylint: disable=broad-except
+ self.record_error(source, e, session)
+
+ def raise_errors(self, timeout_sec=0):
+ """Wait for up to `timeout` seconds for all error sources to finish.
+
+ Preferentially raise "interesting" errors (errors not in the
+ _UNINTERESTING_ERRORS) set.
+
+ Args:
+ timeout_sec: Seconds to wait for other error sources.
+ """
+ for _ in range(timeout_sec):
+ if len(self._errors) == self._num_sources:
+ break
+ time.sleep(1)
+
+ kept_errors = [(k, v) for (k, v) in self._errors.items() if v is not None]
+
+ if not kept_errors:
+ return
+
+ # First check for any interesting errors, then fall back on the session
+ # cancelled errors etc.
+ for k, (exc, _) in kept_errors:
+ if isinstance(exc, _UNINTERESTING_ERRORS):
+ continue
+ else:
+ raise exc
+
+ for k, (exc, _) in kept_errors:
+ raise exc
diff --git a/tensorflow/contrib/tpu/python/tpu/keras_support.py b/tensorflow/contrib/tpu/python/tpu/keras_support.py
index 6e9c607f2e..81798ee423 100644
--- a/tensorflow/contrib/tpu/python/tpu/keras_support.py
+++ b/tensorflow/contrib/tpu/python/tpu/keras_support.py
@@ -45,6 +45,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import abc
import collections
import contextlib
import re
@@ -63,8 +64,11 @@ from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
+from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import model_fn as model_fn_lib
+from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import models
@@ -72,7 +76,9 @@ from tensorflow.python.keras import optimizers as keras_optimizers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
@@ -199,7 +205,6 @@ class TPURewriteContext(object):
caller_obj = caller_frame.f_locals.get('self')
if (caller_obj is not None and
isinstance(caller_obj, base_layer.Layer) and name is not None):
- logging.info('Intercepted name_scope: %s', caller_obj)
return variable_scope.variable_scope(
name, default_name, values, reuse=variable_scope.AUTO_REUSE)
@@ -208,8 +213,51 @@ class TPURewriteContext(object):
self._default_placeholder = array_ops.placeholder
self._default_name_scope = ops.name_scope
self._default_make_variable = base_layer.make_variable
+ self._default_random_normal = random_ops.random_normal
+ self._default_qr = gen_linalg_ops.qr
array_ops.placeholder = _placeholder
+
+ # Replace random_ops.random_normal with a dummy function because
+ # `random_normal` isn't yet implemented on the TPU. Because these
+ # initialized values are overwritten by the CPU values, this is okay.
+ def random_normal(shape,
+ mean=0.0,
+ stddev=1.0,
+ dtype=dtypes.float32,
+ seed=None,
+ name=None):
+ del mean
+ del stddev
+ del seed
+ return array_ops.zeros(shape, dtype=dtype, name=name)
+
+ random_ops.random_normal = random_normal
+
+ # Replace gen_linalg_ops.qr because QR decomposition is not yet implemented.
+ # TODO(saeta): Remove qr override once we confirm the qr implementation is
+ # ok.
+ # pylint: disable=redefined-builtin
+ def qr(input, full_matrices=False, name=None):
+ """Dummy implementation of qr decomposition."""
+ del full_matrices # TODO(saeta): Properly handle the full matrix case.
+ input_shape = input.shape
+ if len(input_shape) < 2:
+ raise ValueError('Invalid shape passed to qr: %s' % input_shape)
+ p = min(input_shape[-1], input_shape[-2])
+ if len(input_shape) == 2:
+ q = array_ops.zeros((p, p), name=name)
+ r = array_ops.zeros(input_shape, name=name)
+ return (r, q)
+ elif len(input_shape) == 3:
+ n = input_shape[0]
+ q = array_ops.zeros((n, p, p), name=name)
+ r = array_ops.zeros(input_shape, name=name)
+ return (r, q)
+ else:
+ raise ValueError('Invalid shape passed to qr: %s' % input_shape)
+ gen_linalg_ops.qr = qr
+
ops.name_scope = _name_scope
base_layer.make_variable = variable_scope.get_variable
logging.info('Overriding default placeholder.')
@@ -219,6 +267,334 @@ class TPURewriteContext(object):
array_ops.placeholder = self._default_placeholder
ops.name_scope = self._default_name_scope
base_layer.make_variable = self._default_make_variable
+ random_ops.random_normal = self._default_random_normal
+ gen_linalg_ops.qr = self._default_qr
+
+
+class SizedInfeed(collections.namedtuple('SizedInfeed',
+ ['sharded_infeed_tensors',
+ 'infeed_ops'])):
+ """Represents an instantiation of the infeed ops for a concrete input shape.
+
+ sharded_infeed_tensors: A data structure of Tensors used to represent the
+ placeholder tensors that must be fed when using feed_dicts.
+
+ infeed_ops: the set of ops that will be run to drive infeed for a single step.
+ """
+ pass
+
+
+class TPUInfeedInstance(object):
+ """TPUInfeedInstance represents the logic to manage feeding in a single step.
+
+ See the comments on the `TPUInfeedManager` for a description for how infeed
+ is managed.
+ """
+
+ @abc.abstractmethod
+ def make_input_specs(self, input_tensors):
+ """Constructs the infeed_specs for the given Infeed instance.
+
+ Args:
+ input_tensors: The inputs to the model.
+
+ Returns:
+ A list of
+ """
+ pass
+
+ def make_feed_dict(self, tpu_model_op):
+ """Constructs a feed_dict for this instance, given the tpu_model_op.
+
+ Args:
+ tpu_model_op: A `TPUModelOp` representing the TPU Model for this
+ instance's input spec.
+
+ Returns:
+ A dictionary to use as the feed_dict of a `session.run` call.
+ """
+ pass
+
+
+class TPUInfeedManager(object):
+ """TPUInfeedManager manages the data infeeding of data to a TPU computation.
+
+ Because there are multiple data sources (e.g. in-memory NumPy arrays,
+ `tf.data.Dataset`s), we abstract the different logic behind a single
+ interface: the `TPUInfeedManager`.
+
+ (1) A `TPUFunction` is called with a set of inputs. Based on the inputs,
+ `TPUFunction` retrieves the corresponding `TPUInfeedManager` (or constructs a
+ new one if required).
+
+ (2) The `TPUFunction` calls `make_infeed_instance` on the `TPUInfeedManager`
+ which returns a `TPUInfeedInstance`.
+
+ (3) The `TPUFunction` checks in the shape cache for a pre-compiled instance of
+ the model based on the returned `input_specs` from `TPUInfeedInstance`.
+
+ (4) [Optional.] If the model has not already been instantiated for the given
+ input spec, the `TPUFunction` compiles the model for the input spec (using the
+ `TPUInfeedManager`).
+
+ (5) The `TPUInfeedInstance` constructs the session.run's feed_dict given the
+ compiled model instance corresponding to its shape.
+ """
+
+ @abc.abstractmethod
+ def make_infeed_instance(self, inputs):
+ """Given a single step's input, construct a `TPUInfeedInstance`.
+
+ Args:
+ inputs: The inputs to a given step.
+
+ Returns:
+ A subclass of `TPUInfeedInstance`.
+ """
+ pass
+
+ @abc.abstractmethod
+ def build_infeed_from_input_specs(self, input_specs, execution_mode):
+ """For a given input specification (size, type), construct the infeed ops.
+
+ This is called only once for a given input specification and builds the
+ graph ops. It does not have a pointer to the actual infeed data.
+
+ Args:
+ input_specs: TODO(saeta): Document me!
+ execution_mode: TODO(saeta): Document me!
+
+ Returns:
+ A `SizedInfeed` instance.
+ """
+ pass
+
+
+class TPUNumpyInfeedManager(TPUInfeedManager):
+ """TPU Infeed manager for Numpy inputs."""
+
+ class NumpyInfeedInstance(TPUInfeedInstance):
+ """Infeed instance for Numpy inputs."""
+
+ def __init__(self, sharded_inputs):
+ self._sharded_inputs = sharded_inputs
+
+ def make_input_specs(self, input_tensors):
+ # Compute an input specification (used to generate infeed enqueue and
+ # dequeue operations). We use the shape from our input array and the
+ # dtype from our model. A user may pass in a float64 for a float32
+ # input: for model compatibility we still must generate a float32 infeed.
+ input_specs = []
+ # We use the shape and dtype from the first shard to compute the input
+ # metadata (`input_specs`); all replicas have the same type and shape.
+ for tensor, ary in zip(input_tensors, self._sharded_inputs[0]):
+ input_specs.append(
+ tensor_spec.TensorSpec(ary.shape, tensor.dtype,
+ _valid_name(tensor.name)))
+
+ return input_specs
+
+ def make_feed_dict(self, tpu_model_op):
+ infeed_dict = {}
+ for infeed_tensors, inputs in zip(tpu_model_op.infeed_tensors,
+ self._sharded_inputs):
+ for tensor, value in zip(infeed_tensors, inputs):
+ infeed_dict[tensor] = value
+ return infeed_dict
+
+ def __init__(self, distribution_strategy):
+ self._strategy = distribution_strategy
+
+ def _split_tensors(self, inputs):
+ """Split input data across shards.
+
+ Each input is sliced along the batch axis.
+
+ Args:
+ inputs: List of Numpy arrays to run on the TPU.
+
+ Returns:
+ List of lists containing the input to feed to each TPU shard.
+ """
+ if self._strategy.num_towers == 1:
+ return [inputs]
+
+ batch_size = inputs[0].shape[0]
+ assert batch_size % self._strategy.num_towers == 0, (
+ 'batch_size must be divisible by strategy.num_towers (%s vs %s)' %
+ (batch_size, self._strategy.num_towers))
+ shard_size = batch_size // self._strategy.num_towers
+ input_list = []
+ for index in range(self._strategy.num_towers):
+ shard_inputs = [
+ x[index * shard_size:(index + 1) * shard_size] for x in inputs
+ ]
+ input_list.append(shard_inputs)
+ return input_list
+
+ def make_infeed_instance(self, inputs):
+ sharded_inputs = self._split_tensors(inputs)
+ return self.NumpyInfeedInstance(sharded_inputs)
+
+ def build_infeed_from_input_specs(self, input_specs, execution_mode):
+ infeed_op = []
+ shard_infeed_tensors = []
+
+ for shard_id in range(self._strategy.num_towers):
+ with ops.device('/device:CPU:0'):
+ infeed_tensors = []
+ with ops.device('/device:TPU:%d' % shard_id):
+ for spec in input_specs:
+ # Construct placeholders for each of the inputs.
+ infeed_tensors.append(
+ array_ops.placeholder(
+ dtype=spec.dtype,
+ shape=spec.shape,
+ name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
+ shard_infeed_tensors.append(infeed_tensors)
+
+ infeed_op.append(
+ tpu_ops.infeed_enqueue_tuple(
+ infeed_tensors, [spec.shape for spec in input_specs],
+ name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
+ device_ordinal=shard_id))
+ return SizedInfeed(infeed_ops=infeed_op,
+ sharded_infeed_tensors=shard_infeed_tensors)
+
+
+class TPUDatasetInfeedManager(TPUInfeedManager):
+ """Manages infeed for a `tf.data.Dataset` into a TPU computation.
+ """
+
+ class DatasetInfeedInstance(TPUInfeedInstance):
+ """An instance of the TPU infeed."""
+
+ def __init__(self, input_specs):
+ self._input_specs = input_specs
+
+ def make_input_specs(self, input_tensors):
+ # TODO(saeta): Do error checking here!
+ return self._input_specs
+
+ def make_feed_dict(self, tpu_model_op):
+ # TODO(saeta): Verify tpu_model_op is as expected!
+ return {}
+
+ def __init__(self, dataset, distribution_strategy, tpu_session):
+ """Constructs a TPUDatasetInfeedManager.
+
+ Must be called within a `KerasTPUModel.tpu_session` context!
+
+ Args:
+ dataset: A `tf.data.Dataset` to infeed.
+ distribution_strategy: The `TPUDistributionStrategy` used to configure the
+ Keras TPU model.
+ tpu_session: The `tf.Session` object used for running the TPU model.
+ """
+ self._verify_dataset_shape(dataset)
+ self._dataset = dataset
+ self._strategy = distribution_strategy
+ dummy_x_shape = dataset.output_shapes[0].as_list()
+ dummy_x_shape[0] *= distribution_strategy.num_towers
+ dummy_y_shape = dataset.output_shapes[1].as_list()
+ dummy_y_shape[0] *= distribution_strategy.num_towers
+ self._iterator = dataset.make_initializable_iterator()
+ tpu_session.run(self._iterator.initializer)
+
+ self._get_next_ops = []
+ ctrl_deps = []
+ for i in range(distribution_strategy.num_towers):
+ with ops.control_dependencies(ctrl_deps): # Ensure deterministic
+ # TODO(saeta): Ensure correct placement!
+ get_next_op = self._iterator.get_next()
+ self._get_next_ops.append(get_next_op)
+ ctrl_deps.extend(get_next_op)
+
+ # Use dummy numpy inputs for the rest of Keras' shape checking. We
+ # intercept them when building the model.
+ self._dummy_x = np.zeros(dummy_x_shape,
+ dtype=dataset.output_types[0].as_numpy_dtype)
+ self._dummy_y = np.zeros(dummy_y_shape,
+ dtype=dataset.output_types[1].as_numpy_dtype)
+
+ input_specs = []
+ if isinstance(self._iterator.output_shapes, tuple):
+ assert isinstance(self._iterator.output_types, tuple)
+ assert len(self._iterator.output_shapes) == len(
+ self._iterator.output_types)
+ for i in range(len(self._iterator.output_shapes)):
+ spec = tensor_spec.TensorSpec(self._iterator.output_shapes[i],
+ self._iterator.output_types[i])
+ input_specs.append(spec)
+ elif isinstance(self._iterator.output_shapes, tensor_shape.TensorShape):
+ spec = tensor_spec.TensorSpec(self._iterator.output_shapes,
+ self._iterator.output_types)
+ input_specs.append(spec)
+
+ self._infeed_instance = self.DatasetInfeedInstance(input_specs)
+
+ def _verify_dataset_shape(self, dataset):
+ """Verifies a dataset is of an appropriate shape for TPUs."""
+ if not isinstance(dataset, dataset_ops.Dataset):
+ raise ValueError('The function passed as the `x` parameter did not '
+ 'return a `tf.data.Dataset`.')
+ if not isinstance(dataset.output_classes, tuple):
+ raise ValueError('The dataset must return a tuple of tf.Tensors, '
+ 'instead it returns: %s' % dataset.output_classes)
+ if len(dataset.output_classes) != 2:
+ raise ValueError(
+ 'The dataset must return a 2-element tuple, got '
+ '%s output classes instead.' % (dataset.output_classes,))
+ for i, cls in enumerate(dataset.output_classes):
+ if cls != ops.Tensor:
+ raise ValueError('The dataset returned a non-Tensor type (%s) at '
+ 'index %d.' % (cls, i))
+ for i, shape in enumerate(dataset.output_shapes):
+ if not shape:
+ raise ValueError('The dataset returns a scalar tensor in '
+ 'tuple index %d. Did you forget to batch? '
+ '(Output shapes: %s).' % (i,
+ dataset.output_shapes))
+ for j, dim in enumerate(shape):
+ if dim.value is None:
+ if j == 0:
+ hint = (' Hint: did you use `ds.batch(BATCH_SIZE, '
+ 'drop_remainder=True)`?')
+ else:
+ hint = ''
+ raise ValueError(
+ 'The Keras-TPU integration for `tf.data` '
+ 'currently requires static shapes. The provided '
+ 'dataset only has a partially defined shape. '
+ '(Dimension %d of output tensor %d is not statically known '
+ 'for output shapes: %s.%s)' % (i, j, dataset.output_shapes, hint))
+
+ @property
+ def dummy_x(self):
+ return self._dummy_x
+
+ @property
+ def dummy_y(self):
+ return self._dummy_y
+
+ def make_infeed_instance(self, inputs):
+ # TODO(saeta): Verify inputs is as expected.
+ return self._infeed_instance
+
+ def build_infeed_from_input_specs(self, input_specs, execution_mode):
+ shard_infeed_tensors = self._get_next_ops
+ assert len(shard_infeed_tensors) == self._strategy.num_towers
+ infeed_ops = []
+ for shard_id in range(self._strategy.num_towers):
+ with ops.device('/device:CPU:0'):
+ infeed_ops.append(
+ tpu_ops.infeed_enqueue_tuple(
+ shard_infeed_tensors[shard_id],
+ [spec.shape for spec in input_specs],
+ name='infeed-enqueue-%s-%d' % (execution_mode, shard_id),
+ device_ordinal=shard_id))
+ return SizedInfeed(infeed_ops=infeed_ops,
+ sharded_infeed_tensors=shard_infeed_tensors)
class TPUFunction(object):
@@ -246,7 +622,7 @@ class TPUFunction(object):
if not isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
self._optimizer_config = self.model.optimizer.get_config()
- def _specialize_model(self, input_specs):
+ def _specialize_model(self, input_specs, infeed_manager):
"""Specialize `self.model` (a Keras model) for the given input shapes."""
# Re-create our input and output layers inside our subgraph. They will be
# attached to the true computation when we clone our model in `tpu_fn`.
@@ -272,8 +648,8 @@ class TPUFunction(object):
name='infeed-%s' % self.execution_mode)
assert len(infeed_tensors) == len(infeed_layers), (
- 'Infeed inputs did not match model: %s vs %s', (infeed_layers,
- infeed_tensors))
+ 'Infeed inputs did not match model: %s vs %s' % (infeed_layers,
+ infeed_tensors))
tpu_targets = []
tpu_input_map = {}
@@ -287,7 +663,9 @@ class TPUFunction(object):
# Clone our CPU model, running within the TPU device context.
with TPURewriteContext(tpu_input_map):
- self._cloned_model = models.clone_model(self.model)
+ # TODO(power): Replicate variables.
+ with ops.device('/device:TPU:0'):
+ self._cloned_model = models.clone_model(self.model)
# Create a copy of the optimizer for this graph.
if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
@@ -360,37 +738,24 @@ class TPUFunction(object):
# Generate CPU side operations to enqueue features/labels and dequeue
# outputs from the model call.
- infeed_op = []
+ sized_infeed = infeed_manager.build_infeed_from_input_specs(
+ input_specs, self.execution_mode)
+ # Build output ops.
outfeed_op = []
- shard_infeed_tensors = []
-
for shard_id in range(self._strategy.num_towers):
- with ops.device('/device:TPU:%d' % shard_id):
- infeed_tensors = []
- for spec in input_specs:
- infeed_tensors.append(
- array_ops.placeholder(
- dtype=spec.dtype,
- shape=spec.shape,
- name='infeed-enqueue-%s-%d' % (spec.name, shard_id)))
- shard_infeed_tensors.append(infeed_tensors)
-
- infeed_op.append(
- tpu_ops.infeed_enqueue_tuple(
- infeed_tensors, [spec.shape for spec in input_specs],
- name='infeed-enqueue-%s-%d' % (self.execution_mode, shard_id)))
-
+ with ops.device('/device:CPU:0'):
outfeed_op.extend(
tpu_ops.outfeed_dequeue_tuple(
dtypes=[spec.dtype for spec in self._outfeed_spec],
shapes=[spec.shape for spec in self._outfeed_spec],
- name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id)))
+ name='outfeed-dequeue-%s-%d' % (self.execution_mode, shard_id),
+ device_ordinal=shard_id))
return TPUModelOp(
compile_op,
execute_op,
- infeed_tensors=shard_infeed_tensors,
- infeed_op=infeed_op,
+ infeed_tensors=sized_infeed.sharded_infeed_tensors,
+ infeed_op=sized_infeed.infeed_ops,
outfeed_op=outfeed_op)
def _test_model_compiles(self, tpu_model_ops):
@@ -409,36 +774,17 @@ class TPUFunction(object):
logging.info('Finished compiling. Time elapsed: %s secs',
end_time - start_time)
- def _split_tensors(self, inputs):
- """Split input data across shards.
-
- Each input is sliced along the batch axis.
-
- Args:
- inputs: List of Numpy arrays to run on the TPU.
-
- Returns:
- List of lists containing the input to feed to each TPU shard.
- """
- if self._strategy.num_towers == 1:
- return [inputs]
-
- batch_size = inputs[0].shape[0]
- assert batch_size % self._strategy.num_towers == 0, (
- 'batch_size must be divisible by strategy.num_towers (%s vs %s)' %
- (batch_size, self._strategy.num_towers))
- shard_size = batch_size // self._strategy.num_towers
- input_list = []
- for index in range(self._strategy.num_towers):
- shard_inputs = [
- x[index * shard_size:(index + 1) * shard_size] for x in inputs
- ]
- input_list.append(shard_inputs)
- return input_list
-
def __call__(self, inputs):
assert isinstance(inputs, list)
+ infeed_manager = None
+ for x, mgr in self.model._numpy_to_infeed_manager_list:
+ if inputs[0] is x:
+ infeed_manager = mgr
+ break
+ if infeed_manager is None:
+ infeed_manager = TPUNumpyInfeedManager(self.model._strategy)
+
# Strip sample weight from inputs
if (self.execution_mode == model_fn_lib.ModeKeys.TRAIN or
self.execution_mode == model_fn_lib.ModeKeys.EVAL):
@@ -447,21 +793,9 @@ class TPUFunction(object):
else:
input_tensors = self.model._feed_inputs
- shard_inputs = self._split_tensors(inputs)
+ infeed_instance = infeed_manager.make_infeed_instance(inputs)
del inputs # To avoid accident usage.
-
- # Compute an input specification (used to generate infeed enqueue and
- # dequeue operations). We use the shape from our input array and the
- # dtype from our model. A user may pass in a float64 for a float32
- # input: for model compatibility we still must generate a float32 infeed.
- input_specs = []
-
- # We use the shape and dtype from the first shard to compute the input
- # metadata (`input_specs`); all replicas have the same type and shape.
- for tensor, ary in zip(input_tensors, shard_inputs[0]):
- input_specs.append(
- tensor_spec.TensorSpec(ary.shape, tensor.dtype,
- _valid_name(tensor.name)))
+ input_specs = infeed_instance.make_input_specs(input_tensors)
# XLA requires every operation in the graph has a fixed shape. To
# handle varying batch sizes we recompile a new sub-graph for each
@@ -472,7 +806,8 @@ class TPUFunction(object):
with self.model.tpu_session():
logging.info('New input shapes; (re-)compiling: mode=%s, %s',
self.execution_mode, input_specs)
- new_tpu_model_ops = self._specialize_model(input_specs)
+ new_tpu_model_ops = self._specialize_model(input_specs,
+ infeed_manager)
self._compilation_cache[shape_key] = new_tpu_model_ops
self._test_model_compiles(new_tpu_model_ops)
@@ -480,11 +815,7 @@ class TPUFunction(object):
self.model._initialize_weights(self._cloned_model)
tpu_model_ops = self._compilation_cache[shape_key]
- infeed_dict = {}
- for infeed_tensors, inputs in zip(tpu_model_ops.infeed_tensors,
- shard_inputs):
- for tensor, value in zip(infeed_tensors, inputs):
- infeed_dict[tensor] = value
+ infeed_dict = infeed_instance.make_feed_dict(tpu_model_ops)
with self.model.tpu_session() as session:
_, _, outfeed_outputs = session.run([
@@ -518,6 +849,11 @@ class KerasTPUModel(models.Model):
name=cpu_model.name,
)
+ # Create a mapping from numpy arrays to infeed managers.
+ # Note: uses a list of tuples instead of a map because numpy arrays are
+ # not hashable.
+ self._numpy_to_infeed_manager_list = []
+
self.predict_function = None
self.test_function = None
self.train_function = None
@@ -529,14 +865,16 @@ class KerasTPUModel(models.Model):
self._tpu_weights_initialized = False
self._graph = ops.Graph()
- cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
+ self._cluster_resolver = tpu_cluster_resolver.TPUClusterResolver(
tpu_name_or_address)
- cluster_spec = cluster_resolver.cluster_spec()
+ master = self._cluster_resolver.master()
+ cluster_spec = self._cluster_resolver.cluster_spec()
self._session = tf_session.Session(
graph=self._graph,
- target=cluster_resolver.master(),
+ target=master,
config=config_pb2.ConfigProto(isolate_session_state=True))
+ # TODO(saeta): Confirm the lines below work in ClusterSpec propagation env.
if cluster_spec:
self._session.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
@@ -588,6 +926,92 @@ class KerasTPUModel(models.Model):
sample_weight_mode, weighted_metrics,
target_tensors, **kwargs)
+ def fit(self,
+ x=None,
+ y=None,
+ batch_size=None,
+ epochs=1,
+ verbose=1,
+ callbacks=None,
+ validation_split=0.,
+ validation_data=None,
+ shuffle=True,
+ class_weight=None,
+ sample_weight=None,
+ initial_epoch=0,
+ steps_per_epoch=None,
+ validation_steps=None,
+ **kwargs):
+ assert not self._numpy_to_infeed_manager_list # Ensure empty.
+
+ infeed_managers = [] # Managers to clean up at the end of the fit call.
+ if isinstance(x, dataset_ops.Dataset):
+ # TODO(b/111413240): Support taking a tf.data.Dataset directly.
+ raise ValueError(
+ 'Taking a Dataset directly is not yet supported. Please '
+ 'wrap your dataset construction code in a function and '
+ 'pass that to fit instead. For examples, see: '
+ 'https://github.com/tensorflow/tpu/tree/master/models/experimental'
+ '/keras')
+ if callable(x):
+ with self.tpu_session() as sess:
+ dataset = x()
+ if steps_per_epoch is None:
+ raise ValueError('When using tf.data as input to a model, you '
+ 'should specify the steps_per_epoch argument.')
+ if y is not None:
+ raise ValueError('When using tf.data as input to a model, y must be '
+ 'None')
+ infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
+ # Use dummy numpy inputs for the rest of Keras' shape checking. We
+ # intercept them when building the model.
+ x = infeed_manager.dummy_x
+ y = infeed_manager.dummy_y
+ infeed_managers.append((x, infeed_manager))
+
+ if isinstance(validation_data, dataset_ops.Dataset):
+ # TODO(b/111413240): Support taking a tf.data.Dataset directly.
+ raise ValueError(
+ 'Taking a Dataset directly is not yet supported. Please '
+ 'wrap your dataset construction code in a function and '
+ 'pass that to fit instead. For examples, see: '
+ 'https://github.com/tensorflow/tpu/tree/master/models/experimental'
+ '/keras')
+ if callable(validation_data):
+ with self.tpu_session() as sess:
+ dataset = validation_data()
+ if validation_steps is None:
+ raise ValueError('When using tf.data as validation for a model, you '
+ 'should specify the validation_steps argument.')
+ infeed_manager = TPUDatasetInfeedManager(dataset, self._strategy, sess)
+ # Use dummy numpy inputs for the rest of Keras' shape checking. We
+ # intercept them when building the model.
+ val_x = infeed_manager.dummy_x
+ val_y = infeed_manager.dummy_y
+ infeed_managers.append((val_x, infeed_manager))
+ validation_data = (val_x, val_y)
+
+ self._numpy_to_infeed_manager_list = infeed_managers
+ try:
+ return super(KerasTPUModel, self).fit(
+ x,
+ y,
+ batch_size,
+ epochs,
+ verbose,
+ callbacks,
+ validation_split,
+ validation_data,
+ shuffle,
+ class_weight,
+ sample_weight,
+ initial_epoch,
+ steps_per_epoch,
+ validation_steps,
+ **kwargs)
+ finally:
+ self._numpy_to_infeed_manager_list = []
+
def _make_train_function(self):
if not self.train_function:
self.train_function = TPUFunction(
@@ -669,10 +1093,10 @@ class KerasTPUModel(models.Model):
K.set_session(default_session)
def shutdown(self):
- logging.info('Shutting down TPU session.')
- with self.tpu_session() as session:
- session.run(tpu.shutdown_system())
-
+ # TODO(b/111364423): Actually shut down the system.
+ logging.info('Skipping shutting down TPU system.')
+ # with self.tpu_session() as session:
+ # session.run(tpu.shutdown_system())
self._session.close()
@@ -706,7 +1130,7 @@ Output shape: %(output_shape)s
'layer': layer,
'input_shape': layer.input_shape,
'output_shape': layer.output_shape
- })
+ })
@experimental
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu.py b/tensorflow/contrib/tpu/python/tpu/tpu.py
index 6a64893d9a..06885bbc25 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu.py
@@ -151,6 +151,41 @@ class TPUReplicateContext(control_flow_ops.XLAControlFlowContext):
self._name = name
self._unsupported_ops = []
self._pivot = pivot
+ self._replicated_vars = {}
+
+ def get_replicated_var_handle(self, var):
+ """Returns a variable handle for replicated TPU variable 'var'.
+
+ This is an method used by an experimental replicated variable
+ implementation and is not intended as a public API.
+
+ Args:
+ var: The replicated TPU variable.
+
+ Returns:
+ The handle of the TPU replicated input node.
+ """
+ handle = self._replicated_vars.get(var)
+ if handle is not None:
+ return handle
+
+ # Builds a TPUReplicatedInput node for the variable, if one does not already
+ # exist. The TPUReplicatedInput node must belong to the enclosing
+ # control-flow scope of the TPUReplicateContext.
+ # TODO(phawkins): consider changing the contract of the TPU encapsulation
+ # so the TPUReplicatedInput nodes go inside the TPUReplicateContext scope
+ # instead.
+
+ # pylint: disable=protected-access
+ graph = ops.get_default_graph()
+ saved_context = graph._get_control_flow_context()
+ graph._set_control_flow_context(self.outer_context)
+ handle = tpu_ops.tpu_replicated_input(
+ [v.handle for v in var._vars], name=var.name + "/handle")
+ graph._set_control_flow_context(saved_context)
+ # pylint: enable=protected-access
+ self._replicated_vars[var] = handle
+ return handle
def report_unsupported_operations(self):
if self._unsupported_ops:
@@ -598,23 +633,14 @@ def split_compile_and_replicate(computation,
with tpu_function.tpu_shard_context(
num_replicas), ops.control_dependencies([metadata]):
- # For backward compatibility reasons, we tag replicated inputs with the
- # _tpu_replicated_input attribute. This does nothing and exists only for
- # backward compatibility.
- # TODO(phawkins): delete the attr_scope after 6/28/2018.
- # pylint: disable=protected-access
- with graph._attr_scope({
- "_tpu_replicated_input": attr_value_pb2.AttrValue(b=True)
- }):
- # Add identity ops so even unused inputs are "consumed" by the
- # computation. This is to avoid orphaned TPUReplicatedInput nodes.
- # TODO(phawkins): consider instead pruning unused TPUReplicatedInput
- # and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
- computation_inputs = [
- array_ops.identity(x, name="replicated_input_{}".format(i))
- for i, x in enumerate(computation_inputs)
- ]
- # pylint: enable=protected-access
+ # Add identity ops so even unused inputs are "consumed" by the
+ # computation. This is to avoid orphaned TPUReplicatedInput nodes.
+ # TODO(phawkins): consider instead pruning unused TPUReplicatedInput
+ # and eliding trivial TPUReplicatedInput/TPUReplicatedOutput pairs.
+ computation_inputs = [
+ array_ops.identity(x, name="replicated_input_{}".format(i))
+ for i, x in enumerate(computation_inputs)
+ ]
# If there is an infeed queue, adds the dequeued values to the
# computation's inputs.
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_context.py b/tensorflow/contrib/tpu/python/tpu/tpu_context.py
index 211c59cb90..a9cf54f77d 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_context.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_context.py
@@ -146,24 +146,7 @@ class TPUContext(object):
# Note that: For the non-model parallelism, the mapping could be
# a random permutation. The order should not matter in most cases
# as far as model is replicated to all cores in the system.
-
- # If the precise replica_id to device mapping is required, please
- # set the num_cores_per_replica to 1 in TPUConfig to enable the
- # model parallelism.
- if self._internal_ctx.model_parallelism_enabled:
- return RuntimeError(
- 'device_for_replica is not yet implemented for model parallelism. '
- 'b/79689078.')
-
- master = self._internal_ctx.master_job
- job_device = '' if master is None else ('/job:%s' % master)
-
- num_of_replicas_per_host = self._internal_ctx.num_of_replicas_per_host
- host_id = replica_id / num_of_replicas_per_host
- ordinal_id = replica_id % num_of_replicas_per_host
-
- host_device = '%s/task:%d/device:CPU:0' % (job_device, host_id)
- return (host_device, ordinal_id)
+ return self._internal_ctx.device_for_replica(replica_id)
class _InternalTPUContext(object):
@@ -595,7 +578,8 @@ class _InternalTPUContext(object):
raise ValueError(message)
if mode == model_fn_lib.ModeKeys.TRAIN:
- if self._train_batch_size % num_replicas != 0:
+ if (self._train_batch_size % num_replicas != 0 and
+ not self.is_input_broadcast_with_iterators()):
raise ValueError(
'train batch size {} must be divisible by number of replicas {}'
.format(self._train_batch_size, num_replicas))
@@ -605,11 +589,12 @@ class _InternalTPUContext(object):
raise ValueError(
'eval_batch_size in TPUEstimator constructor cannot be `None`'
'if .evaluate is running on TPU.')
- if self._eval_batch_size % num_replicas != 0:
+ if (self._eval_batch_size % num_replicas != 0 and
+ not self.is_input_broadcast_with_iterators()):
raise ValueError(
'eval batch size {} must be divisible by number of replicas {}'
.format(self._eval_batch_size, num_replicas))
- if num_hosts > 1:
+ if num_hosts > 1 and not self.is_input_broadcast_with_iterators():
raise ValueError(
'TPUEstimator.evaluate should be running on single TPU worker. '
'got {}.'.format(num_hosts))
@@ -619,11 +604,12 @@ class _InternalTPUContext(object):
raise ValueError(
'predict_batch_size in TPUEstimator constructor should not be '
'`None` if .predict is running on TPU.')
- if self._predict_batch_size % num_replicas != 0:
+ if (self._predict_batch_size % num_replicas != 0 and
+ not self.is_input_broadcast_with_iterators()):
raise ValueError(
'predict batch size {} must be divisible by number of replicas {}'
.format(self._predict_batch_size, num_replicas))
- if num_hosts > 1:
+ if num_hosts > 1 and not self.is_input_broadcast_with_iterators():
raise ValueError(
'TPUEstimator.predict should be running on single TPU worker. '
'got {}.'.format(num_hosts))
@@ -631,6 +617,33 @@ class _InternalTPUContext(object):
# Record the state "validated" into lazy dictionary.
self._lazy_validation_dict[mode] = True
+ def device_for_replica(self, replica_id):
+ """Returns the tuple of (CPU device and device ordinal) for replica.
+
+ This should be used for full replicate for non-model-parallelism.
+
+ Args:
+ replica_id: Int, the replica index.
+
+ Returns:
+ A tuple of device spec for CPU device and int device ordinal.
+ """
+ master = self.master_job
+
+ if self.model_parallelism_enabled:
+ return (self.device_assignment.host_device(
+ replica=replica_id, job=master),
+ self.device_assignment.tpu_ordinal(replica=replica_id))
+
+ job_device = '' if master is None else ('/job:%s' % master)
+
+ num_of_replicas_per_host = self.num_of_replicas_per_host
+ host_id = replica_id / num_of_replicas_per_host
+ ordinal_id = replica_id % num_of_replicas_per_host
+
+ host_device = '%s/task:%d/device:CPU:0' % (job_device, host_id)
+ return (host_device, ordinal_id)
+
class _OneCoreTPUContext(_InternalTPUContext):
"""Special _InternalTPUContext for one core usage."""
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
index 74157a6193..2c7e7d84c0 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
@@ -24,7 +24,6 @@ import os
import signal
import threading
import time
-import traceback
import numpy as np
import six
@@ -32,6 +31,7 @@ from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
+from tensorflow.contrib.tpu.python.tpu import error_handling
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
@@ -365,17 +365,17 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
ctx,
enqueue_ops,
dequeue_ops,
- run_infeed_loop_on_coordinator=True):
+ run_infeed_loop_on_coordinator=True,
+ rendezvous=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
+ self._rendezvous = rendezvous
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
- self._session_cancel_timer = None
-
self._feed_error = None
self._finished = False
@@ -392,62 +392,6 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
- def _log_error(self, session, error):
- """Log an infeed or outfeed error.
-
- This logs a short error message immediately, and schedules a timer to
- emit the full stack trace and error message after a short period of time.
- If the main session has terminated by the time the timer triggers, we
- assume the real source of the error was from the main session and avoid
- emitting a stack trace for the infeed.
-
- Args:
- session: `tf.Session`, session to be terminated error: exception that
- triggered logging.
- error: the Exception to log.
- """
- logging.warning(
- '\n\n'
- 'Error occurred during infeed/outfeed. This may be due to a compile '
- 'error in the main session. Waiting for a short time for the main '
- 'session to come back.\n\n%s', error)
-
- self._feed_error = traceback.format_exc()
-
- # If we've already encountered a feed error, don't schedule another
- # cancellation op.
- if self._session_cancel_timer:
- return
-
- def _cancel_session():
- """Close the session to avoid the main thread from hanging.
-
- If input pipeline triggers any error, the infeed thread dies but the main
- thread for TPU computation waits for the infeed enqueue forever. Close the
- Session to cancel the main thread Session.run execution.
-
- We sleep for a few seconds before closing to give some time for the TPU
- compilation error, if any, propagating, from TPU to CPU host. Compilation
- errors should be reported by the main thread so that the program can be
- interrupted and users can take action. Due to a race condition, the
- infeed thread might see an error first. Closing the session here
- immediately would result in a session cancellation exception in the main
- thread, instead of the expected compile error. User code that depends on
- having the proper exception type will therefore be confused.
- """
- time.sleep(5)
-
- # If the main session is still running, the infeed/outfeed errors are
- # legitimate, and should be logged.
- if not self._finished and self._feed_error:
- logging.error('Feed error: %s', self._feed_error)
- logging.error('Closing session. A RuntimeError should follow.')
- session.close()
-
- self._session_cancel_timer = threading.Thread(target=_cancel_session)
- self._session_cancel_timer.daemon = True
- self._session_cancel_timer.start()
-
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
@@ -456,7 +400,7 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
- try:
+ with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
@@ -466,19 +410,15 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
- except Exception as e: # pylint: disable=broad-except
- self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
- try:
+ with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
- except Exception as e: # pylint: disable=broad-except
- self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
@@ -497,11 +437,6 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
self._feed_error = None
- # Wait for the cancellation timer to complete before continuing.
- if self._session_cancel_timer:
- self._session_cancel_timer.join()
- self._session_cancel_timer = None
-
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
@@ -512,16 +447,14 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
- if self._session_cancel_timer:
- logging.warning('Feed error occurred; waiting for message.')
- self._session_cancel_timer.join()
-
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
+ self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
+ self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
@@ -529,9 +462,10 @@ class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
- def __init__(self, ctx, enqueue_ops, dequeue_ops):
+ def __init__(self, ctx, enqueue_ops, dequeue_ops, rendezvous=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
- ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
+ ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False,
+ rendezvous=rendezvous)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
@@ -701,8 +635,6 @@ def generate_per_core_enqueue_ops_fn_for_host(
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
- infeed_queue.set_configuration_from_sharded_input_tensors(
- per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
@@ -837,8 +769,6 @@ def generate_per_host_v2_enqueue_ops_fn_for_host(
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
- infeed_queue.set_configuration_from_sharded_input_tensors(
- per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
@@ -862,12 +792,13 @@ def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
raise TypeError('Mode PREDICT not yet supported in BROADCAST mode.')
- hooks.append(inputs.dataset_initializer_hook())
+ if is_dataset:
+ hooks.append(inputs.dataset_initializer_hook())
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
- return ctx.device_assignment.tpu_ordinal(replica_id=replica_id)
+ return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
@@ -1340,7 +1271,8 @@ class _ModelFnWrapper(object):
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
- to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
+ if tpu_estimator_spec.eval_metrics:
+ to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
@@ -1643,7 +1575,7 @@ class _OutfeedHostCall(object):
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
- return []
+ return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
@@ -1662,11 +1594,13 @@ class _OutfeedHostCall(object):
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
- tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_replicas):
- with ops.device(tpu_device_placement_fn(i)):
+ host_device, ordinal_id = self._ctx.device_for_replica(i)
+ with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
- dtypes=tensor_dtypes, shapes=tensor_shapes)
+ dtypes=tensor_dtypes,
+ shapes=tensor_shapes,
+ device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
@@ -1790,6 +1724,9 @@ class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
+ TPUEstimator also supports training on CPU and GPU. You don't need to define
+ a separate `tf.estimator.Estimator`.
+
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
@@ -1827,7 +1764,8 @@ class TPUEstimator(estimator_lib.Estimator):
Current limitations:
--------------------
- 1. TPU evaluation only works on a single host (one TPU worker).
+ 1. TPU evaluation only works on a single host (one TPU worker) except
+ BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
@@ -2113,6 +2051,7 @@ class TPUEstimator(estimator_lib.Estimator):
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
+ self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
@@ -2356,6 +2295,65 @@ class TPUEstimator(estimator_lib.Estimator):
"""
pass
+ def train(self,
+ input_fn,
+ hooks=None,
+ steps=None,
+ max_steps=None,
+ saving_listeners=None):
+ rendezvous = error_handling.ErrorRendezvous(num_sources=3)
+ self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
+ try:
+ return super(TPUEstimator, self).train(
+ input_fn=input_fn, hooks=hooks, steps=steps, max_steps=max_steps,
+ saving_listeners=saving_listeners
+ )
+ except Exception as e: # pylint: disable=broad-except
+ rendezvous.record_error('training_loop', e)
+ finally:
+ rendezvous.record_done('training_loop')
+ rendezvous.raise_errors()
+
+ def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
+ name=None):
+ rendezvous = error_handling.ErrorRendezvous(num_sources=3)
+ self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
+ try:
+ return super(TPUEstimator, self).evaluate(
+ input_fn, steps=steps, hooks=hooks, checkpoint_path=checkpoint_path,
+ name=name
+ )
+ except Exception as e: # pylint: disable=broad-except
+ rendezvous.record_error('evaluation_loop', e)
+ finally:
+ rendezvous.record_done('evaluation_loop')
+ rendezvous.raise_errors()
+
+ def predict(self,
+ input_fn,
+ predict_keys=None,
+ hooks=None,
+ checkpoint_path=None,
+ yield_single_examples=True):
+ rendezvous = error_handling.ErrorRendezvous(num_sources=3)
+ self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
+ try:
+ for result in super(TPUEstimator, self).predict(
+ input_fn=input_fn,
+ predict_keys=predict_keys,
+ hooks=hooks,
+ checkpoint_path=checkpoint_path,
+ yield_single_examples=yield_single_examples):
+ yield result
+ except Exception as e: # pylint: disable=broad-except
+ rendezvous.record_error('prediction_loop', e)
+ finally:
+ rendezvous.record_done('prediction_loop')
+ rendezvous.raise_errors()
+
+ rendezvous.record_done('prediction_loop')
+ rendezvous.raise_errors()
+
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
@@ -2450,7 +2448,9 @@ class TPUEstimator(estimator_lib.Estimator):
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
- run_infeed_loop_on_coordinator)),
+ run_infeed_loop_on_coordinator),
+ rendezvous=self._rendezvous[mode],
+ ),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
@@ -2518,7 +2518,8 @@ class TPUEstimator(estimator_lib.Estimator):
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
- for k, v in host_call_ret['eval_metrics'].items():
+
+ for k, v in host_call_ret.get('eval_metrics', {}).items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
@@ -2532,7 +2533,8 @@ class TPUEstimator(estimator_lib.Estimator):
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
- run_infeed_loop_on_coordinator)),
+ run_infeed_loop_on_coordinator),
+ rendezvous=self._rendezvous[mode]),
] + input_hooks
return model_fn_lib.EstimatorSpec(
@@ -2598,8 +2600,8 @@ class TPUEstimator(estimator_lib.Estimator):
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
- TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
- host_ops),
+ TPUInfeedOutfeedSessionHookForPrediction(
+ ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode]),
] + input_hooks
return model_fn_lib.EstimatorSpec(
@@ -3249,3 +3251,47 @@ def _add_item_to_params(params, key, value):
else:
# Now params is Python dict.
params[key] = value
+
+
+def export_estimator_savedmodel(estimator,
+ export_dir_base,
+ serving_input_receiver_fn,
+ assets_extra=None,
+ as_text=False,
+ checkpoint_path=None,
+ strip_default_attrs=False):
+ """Export `Estimator` trained model for TPU inference.
+
+ Args:
+ estimator: `Estimator` with which model has been trained.
+ export_dir_base: A string containing a directory in which to create
+ timestamped subdirectories containing exported SavedModels.
+ serving_input_receiver_fn: A function that takes no argument and
+ returns a `ServingInputReceiver` or `TensorServingInputReceiver`.
+ assets_extra: A dict specifying how to populate the assets.extra directory
+ within the exported SavedModel, or `None` if no extra assets are needed.
+ as_text: whether to write the SavedModel proto in text format.
+ checkpoint_path: The checkpoint path to export. If `None` (the default),
+ the most recent checkpoint found within the model directory is chosen.
+ strip_default_attrs: Boolean. If `True`, default-valued attributes will be
+ removed from the NodeDefs.
+
+ Returns:
+ The string path to the exported directory.
+ """
+ # `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
+ # `estimator.config`.
+ config = tpu_config.RunConfig(model_dir=estimator.model_dir)
+ est = TPUEstimator(
+ estimator._model_fn, # pylint: disable=protected-access
+ config=config,
+ params=estimator.params,
+ use_tpu=True,
+ train_batch_size=2048, # Does not matter.
+ eval_batch_size=2048, # Does not matter.
+ )
+ return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
+ assets_extra,
+ as_text,
+ checkpoint_path,
+ strip_default_attrs)
diff --git a/tensorflow/contrib/verbs/rdma_mgr.cc b/tensorflow/contrib/verbs/rdma_mgr.cc
index 9cb3d1fbbf..3cb5e61fac 100644
--- a/tensorflow/contrib/verbs/rdma_mgr.cc
+++ b/tensorflow/contrib/verbs/rdma_mgr.cc
@@ -23,6 +23,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/bfc_allocator.h"
#include "tensorflow/core/common_runtime/gpu/gpu_process_state.h"
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
+#include "tensorflow/core/common_runtime/pool_allocator.h"
#include "tensorflow/core/common_runtime/process_state.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/session_mgr.h"
@@ -255,28 +256,25 @@ void MRDeleter(ibv_mr* mr) {
}
}
-// TODO(byronyi): remove this class duplicated from the one in
-// common/runtime/gpu/pool_allocator.h when it is available in common_runtime
-class BasicCPUAllocator : public SubAllocator {
- public:
- ~BasicCPUAllocator() override {}
-
- void* Alloc(size_t alignment, size_t num_bytes) override {
- return port::AlignedMalloc(num_bytes, alignment);
- }
- void Free(void* ptr, size_t) override { port::AlignedFree(ptr); }
-};
-
// TODO(byronyi): remove this class and its registration when the default
-// cpu_allocator() returns visitable allocator
+// cpu_allocator() returns visitable allocator, or cpu_allocator() is no
+// longer in use.
class BFCRdmaAllocator : public BFCAllocator {
public:
BFCRdmaAllocator()
- : BFCAllocator(new BasicCPUAllocator(), 1LL << 36, true, "cpu_rdma_bfc") {
+ : BFCAllocator(new BasicCPUAllocator(port::kNUMANoAffinity), 1LL << 36,
+ true, "cpu_rdma_bfc") {}
+};
+class BFCRdmaAllocatorFactory : public AllocatorFactory {
+ public:
+ Allocator* CreateAllocator() { return new BFCRdmaAllocator; }
+
+ SubAllocator* CreateSubAllocator(int numa_node) {
+ return new BasicCPUAllocator(numa_node);
}
};
-REGISTER_MEM_ALLOCATOR("BFCRdmaAllocator", 101, BFCRdmaAllocator);
+REGISTER_MEM_ALLOCATOR("BFCRdmaAllocator", 101, BFCRdmaAllocatorFactory);
void RdmaMgr::InitAllocators() {
RdmaMemoryMgr::Singleton().pd_ = rdma_adapter_->pd_;
@@ -284,8 +282,8 @@ void RdmaMgr::InitAllocators() {
Allocator* allocators[] = {
#if GOOGLE_CUDA
GPUProcessState::singleton()->GetCUDAHostAllocator(0),
- ProcessState::singleton()->GetCPUAllocator(0),
#endif // GOOGLE_CUDA
+ ProcessState::singleton()->GetCPUAllocator(0),
cpu_allocator(),
};
diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD
index 19060c5ce7..e63327b5d4 100644
--- a/tensorflow/core/BUILD
+++ b/tensorflow/core/BUILD
@@ -662,6 +662,7 @@ cc_library(
"lib/random/random_distributions.h",
"lib/random/simple_philox.h",
"lib/strings/numbers.h",
+ "lib/strings/proto_serialization.h",
"lib/strings/str_util.h",
"lib/strings/strcat.h",
"lib/strings/stringprintf.h",
@@ -846,6 +847,7 @@ tf_cuda_library(
"util/sparse/sparse_tensor.h",
"util/stat_summarizer.h",
"util/stat_summarizer_options.h",
+ "util/status_util.h",
"util/stream_executor_util.h",
"util/strided_slice_op.h",
"util/tensor_format.h",
@@ -882,6 +884,16 @@ cc_library(
copts = tf_copts(),
)
+tf_cc_test(
+ name = "stats_calculator_test",
+ srcs = ["util/stats_calculator_test.cc"],
+ deps = [
+ ":stats_calculator_portable",
+ ":test",
+ ":test_main",
+ ],
+)
+
cc_library(
name = "overflow",
hdrs = ["util/overflow.h"],
@@ -1644,6 +1656,7 @@ cc_library(
copts = tf_copts(android_optimization_level_override = None) + [
"-DSUPPORT_SELECTIVE_REGISTRATION",
],
+ linkopts = if_android(["-lz"]),
tags = [
"manual",
"notap",
@@ -1667,6 +1680,7 @@ cc_library(
copts = tf_copts(android_optimization_level_override = None) + tf_opts_nortti_if_android() + [
"-DSUPPORT_SELECTIVE_REGISTRATION",
],
+ linkopts = if_android(["-lz"]),
tags = [
"manual",
"notap",
@@ -2466,6 +2480,7 @@ tf_cuda_library(
"framework/resource_handle.cc",
"util/memmapped_file_system.*",
"util/memmapped_file_system_writer.*",
+ "util/stats_calculator.*",
"util/version_info.cc",
],
) + select({
@@ -2492,6 +2507,7 @@ tf_cuda_library(
":protos_all_proto_text",
":error_codes_proto_text",
":protos_all_cc",
+ ":stats_calculator_portable",
":version_lib",
"//tensorflow/core/platform/default/build_config:platformlib",
"//tensorflow/core/kernels:bounds_check",
@@ -3226,6 +3242,7 @@ tf_cc_tests(
":test",
":test_main",
"//third_party/eigen3",
+ "@zlib_archive//:zlib",
],
)
@@ -3735,7 +3752,6 @@ tf_cc_tests_gpu(
"common_runtime/gpu/gpu_bfc_allocator_test.cc",
"common_runtime/gpu/gpu_device_test.cc",
"common_runtime/gpu/gpu_id_manager_test.cc",
- "common_runtime/gpu/gpu_event_mgr_test.cc",
"common_runtime/gpu/pool_allocator_test.cc",
],
linkstatic = tf_kernel_tests_linkstatic(),
@@ -3759,6 +3775,23 @@ tf_cc_tests_gpu(
],
)
+tf_cc_test_gpu(
+ name = "gpu_event_mgr_test",
+ srcs = ["common_runtime/gpu/gpu_event_mgr_test.cc"],
+ linkstatic = tf_kernel_tests_linkstatic(),
+ tags = tf_cuda_tests_tags(),
+ deps = [
+ ":framework",
+ ":framework_internal",
+ ":lib",
+ ":lib_internal",
+ ":protos_all_cc",
+ ":test",
+ ":test_main",
+ ":testlib",
+ ],
+)
+
tf_cuda_cc_test(
name = "gpu_device_unified_memory_test",
size = "small",
diff --git a/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt b/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt
index 6c3ae09f5d..35c916e269 100644
--- a/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_DrawBoundingBoxes.pbtxt
@@ -30,7 +30,7 @@ height of the underlying image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
-the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
+the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
Parts of the bounding box may fall outside the image.
END
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt
index 3b3a274df5..2b58969da2 100644
--- a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt
@@ -51,7 +51,7 @@ For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
- ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_add(ref, indices, updates)
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt
index b07ee9fda9..17b79ee30c 100644
--- a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt
@@ -51,7 +51,7 @@ For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
- ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+ ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_update(ref, indices, updates)
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
index 58753a651a..ad1c527b01 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
@@ -32,8 +32,12 @@ slices within a tensor (initially zero for numeric, empty for string) of
the given `shape` according to indices. This operator is the inverse of the
@{tf.gather_nd} operator which extracts values or slices from a given tensor.
+If `indices` contains duplicates, then their updates are accumulated (summed).
+
**WARNING**: The order in which updates are applied is nondeterministic, so the
-output will be nondeterministic if `indices` contains duplicates.
+output will be nondeterministic if `indices` contains duplicates -- because
+of some numerical approximation issues, numbers summed in different order
+may yield different results.
`indices` is an integer tensor containing indices into a new tensor of shape
`shape`. The last dimension of `indices` can be at most the rank of `shape`:
diff --git a/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt b/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
index c80ee77f73..ddde3ee5b4 100644
--- a/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_SlideDataset.pbtxt
@@ -8,11 +8,18 @@ sliding window.
END
}
in_arg {
- name: "stride"
+ name: "window_shift"
description: <<END
A scalar representing the steps moving the sliding window
forward in one iteration. It must be positive.
END
}
+ in_arg {
+ name: "window_stride"
+ description: <<END
+A scalar representing the stride of the input elements of the sliding window.
+It must be positive.
+END
+ }
summary: "Creates a dataset that passes a sliding window over `input_dataset`."
}
diff --git a/tensorflow/core/common_runtime/bfc_allocator.cc b/tensorflow/core/common_runtime/bfc_allocator.cc
index 9cda17867b..3bf0532491 100644
--- a/tensorflow/core/common_runtime/bfc_allocator.cc
+++ b/tensorflow/core/common_runtime/bfc_allocator.cc
@@ -155,10 +155,6 @@ bool BFCAllocator::Extend(size_t alignment, size_t rounded_bytes) {
region_manager_.set_handle(c->ptr, h);
- // TODO(vrv): Try to merge this new region with an existing region,
- // if the address space is contiguous, to avoid fragmentation
- // across regions.
-
// Insert the chunk into the right bin.
InsertFreeChunkIntoBin(h);
@@ -465,49 +461,33 @@ void BFCAllocator::FreeAndMaybeCoalesce(BFCAllocator::ChunkHandle h) {
Chunk* c = ChunkFromHandle(h);
CHECK(c->in_use() && (c->bin_num == kInvalidBinNum));
- // Mark the chunk as no longer in use
+ // Mark the chunk as no longer in use.
c->allocation_id = -1;
// Updates the stats.
stats_.bytes_in_use -= c->size;
- // This chunk is no longer in-use, consider coalescing the chunk
- // with adjacent chunks.
- ChunkHandle chunk_to_reassign = h;
-
- // If the next chunk is free, coalesce the two
- if (c->next != kInvalidChunkHandle) {
- Chunk* cnext = ChunkFromHandle(c->next);
- if (!cnext->in_use()) {
- // VLOG(8) << "Chunk at " << cnext->ptr << " merging with c " <<
- // c->ptr;
-
- chunk_to_reassign = h;
+ ChunkHandle coalesced_chunk = h;
- // Deletes c->next
- RemoveFreeChunkFromBin(c->next);
- Merge(h, ChunkFromHandle(h)->next);
- }
+ // If the next chunk is free, merge it into c and delete it.
+ if (c->next != kInvalidChunkHandle && !ChunkFromHandle(c->next)->in_use()) {
+ // VLOG(8) << "Merging c->next " << ChunkFromHandle(c->next)->ptr
+ // << " with c " << c->ptr;
+ RemoveFreeChunkFromBin(c->next);
+ Merge(h, c->next);
}
- // If the previous chunk is free, coalesce the two
- c = ChunkFromHandle(h);
- if (c->prev != kInvalidChunkHandle) {
- Chunk* cprev = ChunkFromHandle(c->prev);
- if (!cprev->in_use()) {
- // VLOG(8) << "Chunk at " << c->ptr << " merging into c->prev "
- // << cprev->ptr;
-
- chunk_to_reassign = c->prev;
+ // If the previous chunk is free, merge c into it and delete c.
+ if (c->prev != kInvalidChunkHandle && !ChunkFromHandle(c->prev)->in_use()) {
+ // VLOG(8) << "Merging c " << c->ptr << " into c->prev "
+ // << ChunkFromHandle(c->prev)->ptr;
- // Deletes c
- RemoveFreeChunkFromBin(c->prev);
- Merge(ChunkFromHandle(h)->prev, h);
- c = ChunkFromHandle(h);
- }
+ coalesced_chunk = c->prev;
+ RemoveFreeChunkFromBin(c->prev);
+ Merge(c->prev, h);
}
- InsertFreeChunkIntoBin(chunk_to_reassign);
+ InsertFreeChunkIntoBin(coalesced_chunk);
}
void BFCAllocator::AddAllocVisitor(Visitor visitor) {
diff --git a/tensorflow/core/common_runtime/bfc_allocator.h b/tensorflow/core/common_runtime/bfc_allocator.h
index 52aedb1e9c..580e61e2ea 100644
--- a/tensorflow/core/common_runtime/bfc_allocator.h
+++ b/tensorflow/core/common_runtime/bfc_allocator.h
@@ -88,11 +88,20 @@ class BFCAllocator : public VisitableAllocator {
static const int kInvalidBinNum = -1;
static const int kNumBins = 21;
- // Chunks point to memory. Their prev/next pointers form a
- // doubly-linked list of addresses sorted by base address that
- // must be contiguous. Chunks contain information about whether
- // they are in use or whether they are free, and contain a pointer
- // to the bin they are in.
+ // A Chunk points to a piece of memory that's either entirely free or entirely
+ // in use by one user memory allocation.
+ //
+ // An AllocationRegion's memory is split up into one or more disjoint Chunks,
+ // which together cover the whole region without gaps. Chunks participate in
+ // a doubly-linked list, and the prev/next pointers point to the physically
+ // adjacent chunks.
+ //
+ // Since a chunk cannot be partially in use, we may need to split a free chunk
+ // in order to service a user allocation. We always merge adjacent free
+ // chunks.
+ //
+ // Chunks contain information about whether they are in use or whether they
+ // are free, and contain a pointer to the bin they are in.
struct Chunk {
size_t size = 0; // Full size of buffer.
@@ -177,8 +186,12 @@ class BFCAllocator : public VisitableAllocator {
static const size_t kMinAllocationBits = 8;
static const size_t kMinAllocationSize = 1 << kMinAllocationBits;
- // AllocationRegion maps pointers to ChunkHandles for a single
- // contiguous memory region.
+ // BFCAllocator allocates memory into a collection of disjoint
+ // AllocationRegions. Each AllocationRegion corresponds to one call to
+ // SubAllocator::Alloc().
+ //
+ // An AllocationRegion contains one or more Chunks, covering all of its
+ // memory. Its primary job is to map a pointers to ChunkHandles.
//
// This class is thread-compatible.
class AllocationRegion {
@@ -191,18 +204,14 @@ class BFCAllocator : public VisitableAllocator {
DCHECK_EQ(0, memory_size % kMinAllocationSize);
const size_t n_handles =
(memory_size + kMinAllocationSize - 1) / kMinAllocationSize;
- handles_ = new ChunkHandle[n_handles];
+ handles_.reset(new ChunkHandle[n_handles]);
for (size_t i = 0; i < n_handles; i++) {
handles_[i] = kInvalidChunkHandle;
}
}
- AllocationRegion() {}
-
- ~AllocationRegion() { delete[] handles_; }
-
+ AllocationRegion() = default;
AllocationRegion(AllocationRegion&& other) { Swap(other); }
-
AllocationRegion& operator=(AllocationRegion&& other) {
Swap(other);
return *this;
@@ -241,7 +250,7 @@ class BFCAllocator : public VisitableAllocator {
// Array of size "memory_size / kMinAllocationSize". It is
// indexed by (p-base) / kMinAllocationSize, contains ChunkHandle
// for the memory allocation represented by "p"
- ChunkHandle* handles_ = nullptr;
+ std::unique_ptr<ChunkHandle[]> handles_;
TF_DISALLOW_COPY_AND_ASSIGN(AllocationRegion);
};
diff --git a/tensorflow/core/common_runtime/direct_session.cc b/tensorflow/core/common_runtime/direct_session.cc
index f903faf1bd..d1fd930d25 100644
--- a/tensorflow/core/common_runtime/direct_session.cc
+++ b/tensorflow/core/common_runtime/direct_session.cc
@@ -146,18 +146,15 @@ class DirectSessionFactory : public SessionFactory {
return options.target.empty();
}
- Session* NewSession(const SessionOptions& options) override {
+ Status NewSession(const SessionOptions& options,
+ Session** out_session) override {
// Must do this before the CPU allocator is created.
if (options.config.graph_options().build_cost_model() > 0) {
EnableCPUAllocatorFullStats(true);
}
std::vector<Device*> devices;
- const Status s = DeviceFactory::AddDevices(
- options, "/job:localhost/replica:0/task:0", &devices);
- if (!s.ok()) {
- LOG(ERROR) << s;
- return nullptr;
- }
+ TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
+ options, "/job:localhost/replica:0/task:0", &devices));
DirectSession* session =
new DirectSession(options, new DeviceMgr(devices), this);
@@ -165,7 +162,8 @@ class DirectSessionFactory : public SessionFactory {
mutex_lock l(sessions_lock_);
sessions_.push_back(session);
}
- return session;
+ *out_session = session;
+ return Status::OK();
}
Status Reset(const SessionOptions& options,
@@ -237,7 +235,11 @@ void DirectSession::SchedClosure(thread::ThreadPool* pool,
// safe given the reasoning above.
c();
#else
- pool->Schedule(std::move(c));
+ if (pool != nullptr) {
+ pool->Schedule(std::move(c));
+ } else {
+ c();
+ }
#endif // __ANDROID__
}
@@ -524,8 +526,9 @@ Status DirectSession::RunInternal(int64 step_id, const RunOptions& run_options,
}
}
- if (run_options.inter_op_thread_pool() < 0 ||
- run_options.inter_op_thread_pool() >= thread_pools_.size()) {
+ if (run_options.inter_op_thread_pool() < -1 ||
+ run_options.inter_op_thread_pool() >=
+ static_cast<int32>(thread_pools_.size())) {
run_state.executors_done.Notify();
delete barrier;
return errors::InvalidArgument("Invalid inter_op_thread_pool: ",
@@ -550,7 +553,19 @@ Status DirectSession::RunInternal(int64 step_id, const RunOptions& run_options,
}
thread::ThreadPool* pool =
- thread_pools_[run_options.inter_op_thread_pool()].first;
+ run_options.inter_op_thread_pool() >= 0
+ ? thread_pools_[run_options.inter_op_thread_pool()].first
+ : nullptr;
+
+ if (pool == nullptr) {
+ // We allow using the caller thread only when having a single executor
+ // specified.
+ if (executors_and_keys->items.size() > 1) {
+ pool = thread_pools_[0].first;
+ } else {
+ VLOG(1) << "Executing Session::Run() synchronously!";
+ }
+ }
Executor::Args::Runner default_runner = [this,
pool](Executor::Args::Closure c) {
@@ -702,7 +717,8 @@ Status DirectSession::Run(const RunOptions& run_options,
// Receive outputs.
if (outputs) {
std::vector<Tensor> sorted_outputs;
- const Status s = call_frame.ConsumeRetvals(&sorted_outputs);
+ const Status s = call_frame.ConsumeRetvals(
+ &sorted_outputs, /* allow_dead_tensors = */ false);
if (errors::IsInternal(s)) {
return errors::InvalidArgument(s.error_message());
} else if (!s.ok()) {
@@ -1188,12 +1204,11 @@ Status DirectSession::CreateExecutors(
delete kernel;
}
};
- params.node_outputs_cb = node_outputs_callback_;
optimizer.Optimize(lib, options_.env, device, &iter->second,
/*shape_map=*/nullptr);
- // EXPERIMENTAL: tfdbg inserts debug nodes in the graph.
+ // TensorFlow Debugger (tfdbg) inserts debug nodes in the graph.
const DebugOptions& debug_options =
options.callable_options.run_options().debug_options();
if (!debug_options.debug_tensor_watch_opts().empty()) {
diff --git a/tensorflow/core/common_runtime/direct_session_test.cc b/tensorflow/core/common_runtime/direct_session_test.cc
index 142d613129..4b51b20bb1 100644
--- a/tensorflow/core/common_runtime/direct_session_test.cc
+++ b/tensorflow/core/common_runtime/direct_session_test.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include <map>
#include <memory>
#include <string>
+#include <thread>
#include <unordered_map>
#include <vector>
@@ -896,6 +897,125 @@ TEST(DirectSessionTest, FetchMultipleTimes) {
}
}
+TEST(DirectSessionTest, MultipleFeedTestSomeSyncRun) {
+ GraphDef def;
+ Graph g(OpRegistry::Global());
+ RunOptions run_options;
+ run_options.set_inter_op_thread_pool(-1);
+
+ Tensor first_value(DT_FLOAT, TensorShape({}));
+ first_value.scalar<float>()() = 1.0;
+ Node* first_const = test::graph::Constant(&g, first_value);
+ Node* first_identity = test::graph::Identity(&g, first_const);
+
+ Tensor second_value(DT_FLOAT, TensorShape({}));
+ second_value.scalar<float>()() = 2.0;
+ Node* second_const = test::graph::Constant(&g, second_value);
+ Node* second_identity = test::graph::Identity(&g, second_const);
+
+ test::graph::ToGraphDef(&g, &def);
+
+ auto session = CreateSession();
+ ASSERT_TRUE(session != nullptr);
+ TF_ASSERT_OK(session->Create(def));
+
+ std::vector<Tensor> outputs;
+
+ // Fetch without feeding.
+ Status s = session->Run(
+ run_options, {},
+ {first_identity->name() + ":0", second_identity->name() + ":0"}, {},
+ &outputs, nullptr);
+ TF_ASSERT_OK(s);
+ ASSERT_EQ(2, outputs.size());
+ ASSERT_EQ(1.0, outputs[0].flat<float>()(0));
+ ASSERT_EQ(2.0, outputs[1].flat<float>()(0));
+
+ s = session->Run(
+ {}, {second_identity->name() + ":0", first_identity->name() + ":0"}, {},
+ &outputs);
+ TF_ASSERT_OK(s);
+ ASSERT_EQ(2, outputs.size());
+ ASSERT_EQ(2.0, outputs[0].flat<float>()(0));
+ ASSERT_EQ(1.0, outputs[1].flat<float>()(0));
+
+ Tensor value_11(DT_FLOAT, TensorShape({}));
+ value_11.scalar<float>()() = 11.0;
+ Tensor value_22(DT_FLOAT, TensorShape({}));
+ value_22.scalar<float>()() = 22.0;
+
+ // Feed [first_const, second_const]
+ s = session->Run(
+ {{first_const->name(), value_11}, {second_const->name(), value_22}},
+ {first_identity->name() + ":0", second_identity->name() + ":0"}, {},
+ &outputs);
+ TF_ASSERT_OK(s);
+ ASSERT_EQ(2, outputs.size());
+ ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
+ ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
+
+ // Feed [second_const, first_const]
+ s = session->Run(
+ {{second_const->name(), value_22}, {first_const->name(), value_11}},
+ {first_identity->name() + ":0", second_identity->name() + ":0"}, {},
+ &outputs);
+ TF_ASSERT_OK(s);
+ ASSERT_EQ(2, outputs.size());
+ ASSERT_EQ(11.0, outputs[0].flat<float>()(0));
+ ASSERT_EQ(22.0, outputs[1].flat<float>()(0));
+
+ // Feed [first_const, first_const]
+ s = session->Run(
+ run_options,
+ {{first_const->name(), value_11}, {first_const->name(), value_22}},
+ {first_identity->name() + ":0", second_identity->name() + ":0"}, {},
+ &outputs, nullptr);
+ EXPECT_TRUE(errors::IsInvalidArgument(s));
+ EXPECT_TRUE(str_util::StrContains(s.error_message(), "fed more than once"));
+}
+
+REGISTER_OP("ThreadID").Input("x: int64").Output("y: int64").Doc(R"doc(
+ThreadID returns the thread ID that called compute.
+
+x: int64
+y: int64
+)doc");
+
+// The ThreadID kernel returns the thread ID that executed Compute.
+class ThreadIDOp : public OpKernel {
+ public:
+ explicit ThreadIDOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
+ void Compute(OpKernelContext* ctx) override {
+ Tensor* out_tensor = nullptr;
+ OP_REQUIRES_OK(ctx,
+ ctx->allocate_output("y", TensorShape({}), &out_tensor));
+ std::hash<std::thread::id> hasher;
+ out_tensor->scalar<int64>()() =
+ static_cast<int64>(hasher(std::this_thread::get_id()));
+ }
+};
+REGISTER_KERNEL_BUILDER(Name("ThreadID").Device(DEVICE_CPU), ThreadIDOp);
+
+TEST(DirectSessionTest, SessionSyncRun) {
+ Graph g(OpRegistry::Global());
+ Tensor vx(DT_INT64, TensorShape({}));
+ vx.scalar<int64>()() = 17;
+ Node* x = test::graph::Constant(&g, vx);
+ Node* y = test::graph::Unary(&g, "ThreadID", x);
+ GraphDef def;
+ test::graph::ToGraphDef(&g, &def);
+ auto sess = CreateSession();
+ TF_ASSERT_OK(sess->Create(def));
+ std::vector<Tensor> outputs;
+ RunOptions run_opts;
+ run_opts.set_inter_op_thread_pool(-1);
+ auto s = sess->Run(run_opts, {}, {y->name() + ":0"}, {}, &outputs, nullptr);
+
+ std::hash<std::thread::id> hasher;
+ EXPECT_EQ(static_cast<int64>(hasher(std::this_thread::get_id())),
+ static_cast<int64>(outputs[0].scalar<int64>()()));
+}
+
REGISTER_OP("Darth").Input("x: float").Output("y: float").Doc(R"doc(
Darth promises one return value.
@@ -1400,6 +1520,7 @@ static void TestSessionInterOpThreadsImpl(bool use_function_lib,
p = options.config.add_session_inter_op_thread_pool();
if (use_global_pools) p->set_global_name("small pool");
p->set_num_threads(1);
+ const int kSyncPool = -1;
const int kLargePool = 0;
const int kSmallPool = 1;
@@ -1442,7 +1563,11 @@ static void TestSessionInterOpThreadsImpl(bool use_function_lib,
EXPECT_FLOAT_EQ(1.2, flat(0));
num_done.fetch_add(1);
};
- tp->Schedule(fn);
+ if (tp != nullptr) {
+ tp->Schedule(fn);
+ } else {
+ fn();
+ }
};
// For blocking states:
@@ -1463,9 +1588,10 @@ static void TestSessionInterOpThreadsImpl(bool use_function_lib,
tp1 = new thread::ThreadPool(Env::Default(), "tp1", 5);
- // Launch 2 session run calls. Neither will finish until the blocking op is
+ // Launch a session run call. It will not finish until the blocking op is
// unblocked, because it is using all threads in the small pool.
add_session_run_call(tp1, y, kSmallPool);
+
blocking_op_state->AwaitState(1); // Wait for the blocking op to Compute.
// These will block on <BlockingOpState>.
@@ -1484,10 +1610,15 @@ static void TestSessionInterOpThreadsImpl(bool use_function_lib,
delete tp2;
EXPECT_EQ(kUnblockedThreads, num_done.load());
+ // Launch a session call using this thread. This will finish as it runs
+ // synchronously in this thread.
+ add_session_run_call(nullptr, x, kSyncPool);
+
// Unblock the blocked op and wait for the blocked functions to finish.
blocking_op_state->MoveToState(1, 2);
delete tp1;
- EXPECT_EQ(kUnblockedThreads + kBlockedThreads + 1, num_done.load());
+
+ EXPECT_EQ(kUnblockedThreads + kBlockedThreads + 1 + 1, num_done.load());
delete blocking_op_state;
blocking_op_state = nullptr;
}
@@ -1532,7 +1663,7 @@ TEST(DirectSessionTest, TestSessionInterOpThreadsInvalidOptions) {
{
std::unique_ptr<Session> session(NewSession(options));
TF_ASSERT_OK(session->Create(def));
- for (int pool_num = -1; pool_num <= 1; pool_num += 2) {
+ for (int pool_num = -2; pool_num <= 1; pool_num += 3) {
RunOptions run_options;
run_options.set_inter_op_thread_pool(pool_num);
std::vector<Tensor> outputs;
diff --git a/tensorflow/core/common_runtime/eager/context.cc b/tensorflow/core/common_runtime/eager/context.cc
index 70208fb6d1..5e0f0a45f8 100644
--- a/tensorflow/core/common_runtime/eager/context.cc
+++ b/tensorflow/core/common_runtime/eager/context.cc
@@ -17,8 +17,20 @@ limitations under the License.
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/lib/core/blocking_counter.h"
+#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
+namespace {
+
+bool ReadBoolFromEnvVar(StringPiece env_var_name, bool default_val) {
+ bool val;
+ if (ReadBoolFromEnvVar(env_var_name, default_val, &val).ok()) {
+ return val;
+ }
+ return default_val;
+}
+
+} // namespace
EagerContext::EagerContext(const SessionOptions& opts,
ContextDevicePlacementPolicy default_policy,
@@ -34,8 +46,16 @@ EagerContext::EagerContext(const SessionOptions& opts,
local_device_manager_.get(), opts.env, TF_GRAPH_DEF_VERSION,
&func_lib_def_, {}, thread_pool_.get())),
log_device_placement_(opts.config.log_device_placement()),
- async_default_(async) {
+ async_default_(async),
+ use_send_tensor_rpc_(false) {
InitDeviceMapAndAsync();
+ if (opts.config.inter_op_parallelism_threads() > 0) {
+ runner_ = [this](std::function<void()> closure) {
+ this->thread_pool_->Schedule(closure);
+ };
+ } else {
+ runner_ = [](std::function<void()> closure) { closure(); };
+ }
}
#ifndef __ANDROID__
@@ -59,7 +79,9 @@ EagerContext::EagerContext(
remote_device_manager_(std::move(remote_device_manager)),
server_(std::move(server)),
remote_eager_workers_(std::move(remote_eager_workers)),
- remote_contexts_(remote_contexts) {
+ remote_contexts_(remote_contexts),
+ use_send_tensor_rpc_(
+ ReadBoolFromEnvVar("TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC", false)) {
InitDeviceMapAndAsync();
}
#endif
diff --git a/tensorflow/core/common_runtime/eager/context.h b/tensorflow/core/common_runtime/eager/context.h
index 864f514a19..4a180e074d 100644
--- a/tensorflow/core/common_runtime/eager/context.h
+++ b/tensorflow/core/common_runtime/eager/context.h
@@ -105,6 +105,8 @@ class EagerContext {
EagerExecutor* Executor() { return &executor_; }
+ std::function<void(std::function<void()>)>* runner() { return &runner_; }
+
// Sets whether this thread should run in synchronous or asynchronous mode.
Status SetAsyncForThread(bool async);
@@ -180,6 +182,11 @@ class EagerContext {
#ifndef __ANDROID__
Status GetClientAndContextID(Device* device, eager::EagerClient** client,
uint64* context_id);
+
+ // If true, then tensors should be shipped across processes via the
+ // EagerService.SendTensor RPC. If false, _Send/_Recv ops should be used
+ // instead (which in-turn use WorkerService.RecvTensor RPCs.
+ bool UseSendTensorRPC() { return use_send_tensor_rpc_; }
#endif
private:
void InitDeviceMapAndAsync();
@@ -214,6 +221,8 @@ class EagerContext {
// session->devices[i].
const std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
+ std::function<void(std::function<void()>)> runner_;
+
mutex cache_mu_;
std::unordered_map<Fprint128, KernelAndDevice*, Fprint128Hasher> kernel_cache_
GUARDED_BY(cache_mu_);
@@ -235,16 +244,18 @@ class EagerContext {
const std::unique_ptr<DeviceMgr> remote_device_manager_;
+#ifndef __ANDROID__
// The server_ is not const since we release it when the context is destroyed.
// Therefore the server_ object is not marked as const (even though it should
// be).
-#ifndef __ANDROID__
std::unique_ptr<ServerInterface> server_;
const std::unique_ptr<eager::EagerClientCache> remote_eager_workers_;
const gtl::FlatMap<string, uint64> remote_contexts_;
gtl::FlatMap<Device*, std::pair<eager::EagerClient*, uint64>>
device_to_client_cache_;
+
+ const bool use_send_tensor_rpc_;
#endif
};
diff --git a/tensorflow/core/common_runtime/eager/execute.cc b/tensorflow/core/common_runtime/eager/execute.cc
index 7a2b477845..7ea78b63d9 100644
--- a/tensorflow/core/common_runtime/eager/execute.cc
+++ b/tensorflow/core/common_runtime/eager/execute.cc
@@ -88,6 +88,8 @@ Status MaybeCopyInputToExpectedDevice(EagerOperation* op, int i,
TF_RETURN_IF_ERROR((*handle)->Device(&handle_device));
const Device* actual_device =
handle_device == nullptr ? ctx->HostCPU() : handle_device;
+ const Device* op_device =
+ op->Device() == nullptr ? ctx->HostCPU() : op->Device();
if (expected_device != actual_device) {
switch (ctx->GetDevicePlacementPolicy()) {
@@ -106,8 +108,8 @@ Status MaybeCopyInputToExpectedDevice(EagerOperation* op, int i,
" cannot compute ",
op->Name(), " as input #", i, " was expected to be on ",
expected_device->name(), " but is actually on ",
- actual_device->name(), " (operation running on ",
- op->Device()->name(), ")",
+ actual_device->name(), " (operation running on ", op_device->name(),
+ ")",
" Tensors can be copied explicitly using .gpu() or .cpu() "
"methods,"
" or transparently copied by using tf.enable_eager_execution("
@@ -118,7 +120,7 @@ Status MaybeCopyInputToExpectedDevice(EagerOperation* op, int i,
LOG(WARNING) << "before computing " << op->Name() << " input #" << i
<< " was expected to be on " << expected_device->name()
<< " but is actually on " << actual_device->name()
- << " (operation running on " << op->Device()->name()
+ << " (operation running on " << op_device->name()
<< "). This triggers a copy which can be a performance "
"bottleneck.";
break;
@@ -512,7 +514,8 @@ Status EagerLocalExecute(EagerOperation* op,
// See WARNING comment in Execute (before kernel->Run) - would be nice to
// rework to avoid this subtlety.
tf_shared_lock l(*ctx->FunctionsMu());
- status = KernelAndDevice::Init(ndef, ctx->func_lib(device), kernel);
+ status = KernelAndDevice::Init(ndef, ctx->func_lib(device), ctx->runner(),
+ kernel);
if (!status.ok()) {
delete kernel;
return status;
@@ -582,6 +585,87 @@ Status EagerLocalExecute(EagerOperation* op,
return status;
}
+std::function<void()> GetRemoteTensorDestructor(
+ EagerContext* ctx, eager::EagerClient* eager_client, uint64 context_id,
+ uint64 op_id, int output_num) {
+ return [ctx, eager_client, context_id, op_id, output_num]() {
+ std::unique_ptr<eager::EnqueueRequest> request(new eager::EnqueueRequest);
+ request->set_context_id(context_id);
+
+ auto* handle_to_decref = request->add_queue()->mutable_handle_to_decref();
+ handle_to_decref->set_op_id(op_id);
+ handle_to_decref->set_output_num(output_num);
+
+ if (ctx->Async()) {
+ tensorflow::uint64 id = ctx->NextId();
+ auto* node =
+ new eager::RemoteExecuteNode(id, std::move(request), eager_client);
+ ctx->ExecutorAdd(node);
+ } else {
+ eager::EnqueueRequest* actual_request = request.release();
+ eager::EnqueueResponse* response = new eager::EnqueueResponse;
+ eager_client->EnqueueAsync(
+ actual_request, response,
+ [actual_request, response](const tensorflow::Status& s) {
+ delete actual_request;
+ delete response;
+ });
+ }
+
+ return tensorflow::Status::OK();
+ };
+}
+
+// When !ctx->UseSendTensorRPC(), then tensors are shipped between remote
+// devices by the receiver invoking the WorkerService.RecvTensor RPC *on the
+// sender* (Rendezvous::RecvAsync() invoked by the _Recv kernel).
+//
+// However, in some configurations the node that has the tensor to be copied
+// isn't running a server (WorkerService RPC interface). For such cases,
+// this function enables sending tensors using the EagerService.SendTensor RPC
+// *on the receiver*.
+Status EagerRemoteSendTensor(EagerContext* ctx, TensorHandle* h,
+ Device* recv_device, TensorHandle** result) {
+ eager::EagerClient* eager_client;
+ uint64 context_id;
+ TF_RETURN_IF_ERROR(
+ ctx->GetClientAndContextID(recv_device, &eager_client, &context_id));
+
+ eager::SendTensorRequest request;
+ eager::SendTensorResponse response;
+
+ request.set_context_id(context_id);
+ request.set_op_id(ctx->NextId());
+ request.set_device_name(recv_device->name());
+
+ const Tensor* tensor;
+ TF_RETURN_IF_ERROR(h->Tensor(&tensor));
+ tensor->AsProtoTensorContent(request.add_tensors());
+
+ const tensorflow::uint64 id = request.op_id();
+
+ // TODO(nareshmodi): support making this call async.
+ Notification n;
+ Status status;
+ eager_client->SendTensorAsync(&request, &response,
+ [&n, &status](const Status& s) {
+ status = s;
+ n.Notify();
+ });
+ n.WaitForNotification();
+ if (!status.ok()) return status;
+
+ std::function<void()> destructor =
+ GetRemoteTensorDestructor(ctx, eager_client, context_id, id, 0);
+
+ *result = new TensorHandle(id, /*output_num=*/0, /*remote_shape_node_id=*/0,
+ tensor->dtype(), std::move(destructor),
+ recv_device, recv_device, ctx);
+ (*result)->SetRemoteShape(MakeUnique<TensorShape>(tensor->shape()));
+
+ return Status::OK();
+}
+
Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
int* num_retvals) {
#ifdef __ANDROID__
@@ -595,10 +679,12 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
TF_RETURN_IF_ERROR(
ctx->GetClientAndContextID(op->Device(), &eager_client, &context_id));
- eager::EnqueueRequest request;
+ std::unique_ptr<eager::EnqueueRequest> request(new eager::EnqueueRequest);
eager::EnqueueResponse response;
- auto* remote_op = request.add_queue()->mutable_operation();
+ request->set_context_id(context_id);
+
+ auto* remote_op = request->add_queue()->mutable_operation();
for (int i = 0; i < op->Inputs().size(); i++) {
tensorflow::Device* input_device;
@@ -628,8 +714,6 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
op->Attrs().FillAttrValueMap(remote_op->mutable_attrs());
remote_op->set_device(op->Device()->name());
- request.set_context_id(context_id);
-
DataTypeVector output_dtypes;
TF_RETURN_IF_ERROR(GetOutputDTypes(op, &output_dtypes));
@@ -651,32 +735,11 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
for (int i = 0; i < *num_retvals; i++) {
// TODO(nareshmodi): Change the callback to instead add the decref to a list
// of pending decrefs that we can send as a batch with the next execute.
- std::function<void()> callback = [ctx, eager_client, context_id, id, i]() {
- eager::EnqueueRequest request;
- request.set_context_id(context_id);
-
- auto* handle_to_decref = request.add_queue()->mutable_handle_to_decref();
- handle_to_decref->set_op_id(id);
- handle_to_decref->set_output_num(i);
-
- if (ctx->Async()) {
- tensorflow::uint64 id = ctx->NextId();
- auto* node = new eager::RemoteExecuteNode(id, request, eager_client);
- ctx->ExecutorAdd(node);
- } else {
- Notification n;
- eager::EnqueueResponse response;
- eager_client->EnqueueAsync(
- &request, &response,
- [&n](const tensorflow::Status& s) { n.Notify(); });
- n.WaitForNotification();
- }
-
- return tensorflow::Status::OK();
- };
+ std::function<void()> destructor =
+ GetRemoteTensorDestructor(ctx, eager_client, context_id, id, i);
retvals[i] = new TensorHandle(remote_op->id(), i, remote_node_id,
- output_dtypes[i], std::move(callback),
+ output_dtypes[i], std::move(destructor),
op_device, op_device, op->EagerContext());
}
@@ -690,7 +753,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
}
// Unable to capture via std::move, so bind instead.
auto* node = new eager::RemoteExecuteNode(
- remote_node_id, request, eager_client, op->Inputs(),
+ remote_node_id, std::move(request), eager_client, op->Inputs(),
std::bind(
[](const gtl::InlinedVector<TensorHandle*, 2>& retvals,
const Status& status, const eager::EnqueueResponse& response) {
@@ -707,7 +770,7 @@ Status EagerRemoteExecute(EagerOperation* op, TensorHandle** retvals,
} else {
Notification n;
Status status;
- eager_client->EnqueueAsync(&request, &response,
+ eager_client->EnqueueAsync(request.get(), &response,
[&n, &status](const Status& s) {
status = s;
n.Notify();
@@ -936,6 +999,8 @@ Status EagerCopyToDevice(TensorHandle* h, EagerContext* ctx,
if (sender_is_local && recver_is_local) {
return LocalEagerCopyToDevice(h, ctx, recv_device, result);
+ } else if (ctx->UseSendTensorRPC() && sender_is_local && !recver_is_local) {
+ return EagerRemoteSendTensor(ctx, h, recv_device, result);
} else {
string wire_id = GetUniqueWireID();
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.cc b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
index b410ea175b..dae5d1983f 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.cc
@@ -41,17 +41,22 @@ Status KernelAndDevice::InitOp(Device* device, const NodeDef& ndef,
out->device_ = device;
out->kernel_.reset(k);
out->flib_ = nullptr;
+ out->runner_ = nullptr;
+ out->default_runner_ = [](std::function<void()> f) { f(); };
return s;
}
// static
Status KernelAndDevice::Init(const NodeDef& ndef, FunctionLibraryRuntime* flib,
+ std::function<void(std::function<void()>)>* runner,
KernelAndDevice* out) {
OpKernel* k = nullptr;
Status s = flib->CreateKernel(ndef, &k);
out->device_ = flib->device();
out->kernel_.reset(k);
out->flib_ = flib;
+ out->runner_ = runner;
+ out->default_runner_ = [](std::function<void()> f) { f(); };
return s;
}
@@ -83,10 +88,11 @@ Status KernelAndDevice::Run(std::vector<Tensor>* input_tensors,
if (stats != nullptr) {
params.track_allocations = true;
}
- // TODO(apassos): use a thread pool.
- std::function<void(std::function<void()>)> runner =
- [](std::function<void()> f) { f(); };
- params.runner = &runner;
+ if (runner_ == nullptr) {
+ params.runner = &default_runner_;
+ } else {
+ params.runner = runner_;
+ }
ScopedStepContainer step_container(0, [this](const string& name) {
device_->resource_manager()->Cleanup(name).IgnoreError();
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device.h b/tensorflow/core/common_runtime/eager/kernel_and_device.h
index c41a0972b1..c0b676b285 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device.h
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device.h
@@ -57,6 +57,7 @@ class KernelAndDevice {
// the FunctionLibraryRuntime is pushed on to the caller (see locking in
// c_api.cc).
static Status Init(const NodeDef& ndef, FunctionLibraryRuntime* flib,
+ std::function<void(std::function<void()>)>* runner,
KernelAndDevice* out);
// TODO(ashankar): Remove this
static Status InitOp(Device* device, const NodeDef& ndef,
@@ -88,6 +89,8 @@ class KernelAndDevice {
checkpoint::TensorSliceReaderCacheWrapper slice_reader_cache_;
Rendezvous* rendez_;
DataTypeVector output_dtypes_;
+ std::function<void(std::function<void()>)>* runner_;
+ std::function<void(std::function<void()>)> default_runner_;
};
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc b/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
index b4349e1dee..6abe98f53c 100644
--- a/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
+++ b/tensorflow/core/common_runtime/eager/kernel_and_device_test.cc
@@ -107,8 +107,8 @@ void BM_KernelAndDeviceInit(int iters) {
KernelAndDevice k(nullptr);
tensorflow::testing::StartTiming();
for (int i = 0; i < iters; ++i) {
- TF_CHECK_OK(
- KernelAndDevice::Init(ndef, env.function_library_runtime(), &k));
+ TF_CHECK_OK(KernelAndDevice::Init(ndef, env.function_library_runtime(),
+ nullptr, &k));
}
}
BENCHMARK(BM_KernelAndDeviceInit);
@@ -128,8 +128,8 @@ void BM_KernelAndDeviceRun(int iters) {
.BuildNodeDef());
TestEnv env;
KernelAndDevice kernel(nullptr);
- TF_CHECK_OK(
- KernelAndDevice::Init(ndef, env.function_library_runtime(), &kernel));
+ TF_CHECK_OK(KernelAndDevice::Init(ndef, env.function_library_runtime(),
+ nullptr, &kernel));
tensorflow::testing::StartTiming();
for (int i = 0; i < iters; ++i) {
TF_CHECK_OK(kernel.Run(&inputs, &outputs, nullptr));
diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.cc b/tensorflow/core/common_runtime/eager/tensor_handle.cc
index f9b9abcc99..85b0b79bce 100644
--- a/tensorflow/core/common_runtime/eager/tensor_handle.cc
+++ b/tensorflow/core/common_runtime/eager/tensor_handle.cc
@@ -109,6 +109,19 @@ Status TensorHandle::TensorAndDevice(const tensorflow::Tensor** tensor,
return Status::OK();
}
+Status TensorHandle::Shape(tensorflow::TensorShape* shape) {
+ if (IsRemote()) {
+ TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
+ CHECK(remote_shape_ != nullptr);
+ *shape = *(remote_shape_.get());
+ } else {
+ TF_RETURN_IF_ERROR(WaitReady());
+ DCHECK(IsReady());
+ *shape = tensor_.shape();
+ }
+ return Status::OK();
+}
+
Status TensorHandle::NumDims(int* num_dims) {
if (IsRemote()) {
TF_RETURN_IF_ERROR(WaitForNode(remote_shape_node_id_, false));
diff --git a/tensorflow/core/common_runtime/eager/tensor_handle.h b/tensorflow/core/common_runtime/eager/tensor_handle.h
index 46bc94f875..1bc9c6531a 100644
--- a/tensorflow/core/common_runtime/eager/tensor_handle.h
+++ b/tensorflow/core/common_runtime/eager/tensor_handle.h
@@ -109,6 +109,8 @@ class TensorHandle : public core::RefCounted {
tensorflow::Device** device,
tensorflow::Device** op_device);
+ Status Shape(tensorflow::TensorShape* shape);
+
Status NumDims(int* num_dims);
Status Dim(int dim_index, int64* dim);
@@ -138,6 +140,12 @@ class TensorHandle : public core::RefCounted {
remote_shape_ = std::move(remote_shape);
}
+ bool OnHostCPU() {
+ mutex_lock ml(ctx_mutex_);
+ return device_ == nullptr ||
+ (ctx_ == nullptr || ctx_->HostCPU() == device_);
+ }
+
private:
// If the contents of the Tensor pointed to by this handle is yet to be
// computed by a EagerNode, this function will block till that compuatation is
diff --git a/tensorflow/core/common_runtime/executor.cc b/tensorflow/core/common_runtime/executor.cc
index f7f2cdc14f..8096139d90 100644
--- a/tensorflow/core/common_runtime/executor.cc
+++ b/tensorflow/core/common_runtime/executor.cc
@@ -1966,17 +1966,9 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
device_context = device_context_map_[node->id()];
}
- // Experimental: debugger (tfdb) access to intermediate node completion.
- if (item.num_outputs == 0 && impl_->params_.node_outputs_cb != nullptr) {
- // If the node has no output, invoke the callback with output slot set to
- // -1, signifying that this is a no-output node.
- s.Update(impl_->params_.node_outputs_cb(item.node->name(), -1, nullptr,
- false, ctx));
- }
-
for (int i = 0; i < item.num_outputs; ++i) {
const TensorValue val = ctx->release_output(i);
- if (*ctx->is_output_dead() || val.tensor == nullptr) {
+ if (val.tensor == nullptr) {
// Unless it's a Switch or a Recv, the node must produce a
// tensor value at i-th output.
if (!IsSwitch(node) && !IsRecv(node)) {
@@ -2018,13 +2010,6 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, to_log);
}
-
- // Experimental: debugger (tfdb) access to intermediate node
- // outputs.
- if (impl_->params_.node_outputs_cb != nullptr) {
- s.Update(impl_->params_.node_outputs_cb(item.node->name(), i,
- out->ref, true, ctx));
- }
} else {
// NOTE that std::move is used here, so val.tensor goes to
// uninitialized state (val.tensor->IsInitialized return false).
@@ -2036,12 +2021,6 @@ Status ExecutorState::ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, *out->val);
}
-
- // Experimental: debugger access to intermediate node outputs.
- if (impl_->params_.node_outputs_cb != nullptr) {
- s.Update(impl_->params_.node_outputs_cb(
- item.node->name(), i, out->val.get(), false, ctx));
- }
}
} else {
s.Update(errors::Internal("Output ", i, " of type ",
diff --git a/tensorflow/core/common_runtime/executor.h b/tensorflow/core/common_runtime/executor.h
index e5d7b7c53c..cd01b43aea 100644
--- a/tensorflow/core/common_runtime/executor.h
+++ b/tensorflow/core/common_runtime/executor.h
@@ -103,7 +103,6 @@ class Executor {
const Tensor* tensor, const bool is_ref,
OpKernelContext* ctx)>
NodeOutputsCallback;
- NodeOutputsCallback node_outputs_cb = nullptr;
};
typedef std::function<void(const Status&)> DoneCallback;
virtual void RunAsync(const Args& args, DoneCallback done) = 0;
@@ -139,8 +138,6 @@ struct LocalExecutorParams {
// when the executor is deleted.
std::function<Status(const NodeDef&, OpKernel**)> create_kernel;
std::function<void(OpKernel*)> delete_kernel;
-
- Executor::Args::NodeOutputsCallback node_outputs_cb;
};
::tensorflow::Status NewLocalExecutor(const LocalExecutorParams& params,
std::unique_ptr<const Graph> graph,
diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc
index a93cfa2ec5..54bbe84b57 100644
--- a/tensorflow/core/common_runtime/function.cc
+++ b/tensorflow/core/common_runtime/function.cc
@@ -746,6 +746,8 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
rets_alloc_attrs.push_back(ret_alloc_attrs);
}
+ bool allow_dead_tensors = opts.allow_dead_tensors;
+
// The ProcFLR sends the arguments to the function from the source_device to
// the target_device. So here we receive those arguments. Similarly, when the
// computation is done and stored in *rets, we send the return values back
@@ -756,7 +758,7 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
device_context, args_alloc_attrs, rendezvous, remote_args,
[frame, remote_args, item, source_device, target_device,
target_incarnation, rendezvous, device_context, rets, done, exec_args,
- rets_alloc_attrs](const Status& status) {
+ rets_alloc_attrs, allow_dead_tensors](const Status& status) {
Status s = status;
if (s.ok()) {
s = frame->SetArgs(*remote_args);
@@ -769,13 +771,13 @@ void FunctionLibraryRuntimeImpl::RunRemote(const Options& opts, Handle handle,
return;
}
item->exec->RunAsync(
- *exec_args,
- [frame, rets, done, source_device, target_device,
- target_incarnation, rendezvous, device_context, remote_args,
- exec_args, rets_alloc_attrs](const Status& status) {
+ *exec_args, [frame, rets, done, source_device, target_device,
+ target_incarnation, rendezvous, device_context,
+ remote_args, exec_args, rets_alloc_attrs,
+ allow_dead_tensors](const Status& status) {
Status s = status;
if (s.ok()) {
- s = frame->ConsumeRetvals(rets);
+ s = frame->ConsumeRetvals(rets, allow_dead_tensors);
}
delete frame;
if (!s.ok()) {
@@ -859,14 +861,15 @@ void FunctionLibraryRuntimeImpl::Run(const Options& opts, Handle handle,
return;
}
+ bool allow_dead_tensors = opts.allow_dead_tensors;
item->exec->RunAsync(
// Executor args
*exec_args,
// Done callback.
- [frame, rets, done, exec_args](const Status& status) {
+ [frame, rets, done, exec_args, allow_dead_tensors](const Status& status) {
Status s = status;
if (s.ok()) {
- s = frame->ConsumeRetvals(rets);
+ s = frame->ConsumeRetvals(rets, allow_dead_tensors);
}
delete frame;
delete exec_args;
diff --git a/tensorflow/core/common_runtime/gpu/gpu_device.cc b/tensorflow/core/common_runtime/gpu/gpu_device.cc
index 3cb51b0dbc..3292ef2f62 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_device.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_device.cc
@@ -41,6 +41,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_util.h"
#include "tensorflow/core/common_runtime/gpu_device_context.h"
#include "tensorflow/core/common_runtime/local_device.h"
+#include "tensorflow/core/common_runtime/visitable_allocator.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
@@ -224,6 +225,7 @@ class BaseGPUDevice::StreamGroupFactory {
int num_d2d_streams =
options.experimental().num_dev_to_dev_copy_streams();
+ if (num_d2d_streams == 0) num_d2d_streams = 1;
if (num_d2d_streams < 1 || num_d2d_streams > 4) {
LOG(ERROR)
<< "Illegal GPUOptions.experimental.num_dev_to_dev_copy_streams="
@@ -856,7 +858,7 @@ void BaseGPUDevice::ReinitializeDevice(OpKernelContext* context,
static_cast<ConcretePerOpGpuDevice*>(device);
DCHECK(concrete_device);
const cudaStream_t* cuda_stream = reinterpret_cast<const cudaStream_t*>(
- streams_[stream_id]->compute->implementation()->CudaStreamMemberHack());
+ streams_[stream_id]->compute->implementation()->GpuStreamMemberHack());
concrete_device->Reinitialize(context, cuda_stream, tf_gpu_id_, allocator,
scratch_[stream_id]);
}
diff --git a/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc b/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc
index 4898448476..3c1c31aa73 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_event_mgr.cc
@@ -15,11 +15,80 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
+#include "tensorflow/core/platform/stacktrace.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
+namespace {
+// The EventMgr has 1 thread for the polling loop and one to execute
+// event callback functions. Issues for reconsideration:
+// - Is this the right number of threads?
+// - Should EventMgrs be shared between GPUDevices on a multi-GPU machine?
+static const int kNumThreads = 2;
+} // namespace
+
+namespace gpu_event_mgr {
+class ThreadLabel {
+ public:
+ static const char* GetValue() { return value_; }
+
+ // v must be a static const because value_ will capture and use its value
+ // until reset or thread terminates.
+ static void SetValue(const char* v) { value_ = v; }
+
+ private:
+ static thread_local const char* value_;
+};
+thread_local const char* ThreadLabel::value_ = "";
+
+void WarnIfInCallback(std::function<void()> f) {
+ const char* label = ThreadLabel::GetValue();
+ if (label && !strcmp(label, "gpu_event_mgr")) {
+ if (f) {
+ f();
+ } else {
+ LOG(WARNING) << "Executing inside EventMgr callback thread: "
+ << CurrentStackTrace();
+ }
+ }
+}
+
+void InitThreadpoolLabels(thread::ThreadPool* threadpool) {
+ static const char* label = "gpu_event_mgr";
+ mutex mu;
+ int init_count = 0;
+ condition_variable all_initialized;
+ int exit_count = 0;
+ condition_variable ready_to_exit;
+ const int num_threads = threadpool->NumThreads();
+ for (int i = 0; i < num_threads; ++i) {
+ threadpool->Schedule([num_threads, &mu, &init_count, &all_initialized,
+ &exit_count, &ready_to_exit]() {
+ gpu_event_mgr::ThreadLabel::SetValue(label);
+ mutex_lock l(mu);
+ ++init_count;
+ if (init_count == num_threads) {
+ all_initialized.notify_all();
+ }
+ while (init_count < num_threads) {
+ all_initialized.wait(l);
+ }
+ if (++exit_count == num_threads) {
+ ready_to_exit.notify_all();
+ }
+ });
+ }
+ {
+ mutex_lock l(mu);
+ while (exit_count < num_threads) {
+ ready_to_exit.wait(l);
+ }
+ }
+}
+} // namespace gpu_event_mgr
+
EventMgr::EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options)
: exec_(se),
deferred_bytes_threshold_(gpu_options.deferred_deletion_bytes()
@@ -31,9 +100,8 @@ EventMgr::EventMgr(se::StreamExecutor* se, const GPUOptions& gpu_options)
accumulated_stream_(nullptr),
accumulated_tensors_(new TensorReferenceVector),
accumulated_tensor_bytes_(0),
- // threadpool_ has 1 thread for the polling loop, and one to execute
- // event callback functions. Maybe we should have more?
- threadpool_(Env::Default(), "GPU_Event_Manager", 2) {
+ threadpool_(Env::Default(), "GPU_Event_Manager", kNumThreads) {
+ gpu_event_mgr::InitThreadpoolLabels(&threadpool_);
StartPollingLoop();
}
diff --git a/tensorflow/core/common_runtime/gpu/gpu_event_mgr.h b/tensorflow/core/common_runtime/gpu/gpu_event_mgr.h
index b26f88a201..f0a109cc10 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_event_mgr.h
+++ b/tensorflow/core/common_runtime/gpu/gpu_event_mgr.h
@@ -39,6 +39,25 @@ namespace tensorflow {
class GPUOptions;
+// The callback provided to EventMgr::ThenExecute must not block or take a long
+// time. If it does, performance may be impacted and GPU memory may be
+// exhausted. This macro is for checking that an EventMgr thread is not
+// accidentally entering blocking parts of the code, e.g. the RPC subsystem.
+//
+// Intended use is something like
+//
+// void RespondToAnRPC(Params* params) {
+// WARN_IF_IN_EVENT_MGR_THREAD;
+// if (params->status.ok()) { ...
+//
+namespace gpu_event_mgr {
+// Logs a stack trace if current execution thread belongs to this EventMgr
+// object. If f is not nullptr, executes instead of logging the stack trace.
+// trace.
+void WarnIfInCallback(std::function<void()> f);
+} // namespace gpu_event_mgr
+#define WARN_IF_IN_EVENT_MGR_THREAD gpu_event_mgr::WarnIfInCallback(nullptr)
+
// An object to keep track of pending Events in the StreamExecutor streams
// and associated Tensors that cannot safely be deleted until the associated
// Events are recorded.
@@ -74,6 +93,9 @@ class EventMgr {
FreeMemory(to_free);
}
+ // Execute func when all pending stream actions have completed.
+ // func must be brief and non-blocking since it executes in the one
+ // thread used for all such callbacks and also buffer deletions.
inline void ThenExecute(se::Stream* stream, std::function<void()> func) {
ToFreeVector to_free;
{
diff --git a/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc b/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc
index c5ff6c97a1..d2adf699f5 100644
--- a/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc
+++ b/tensorflow/core/common_runtime/gpu/gpu_event_mgr_test.cc
@@ -19,6 +19,7 @@ limitations under the License.
#include <atomic>
#include "tensorflow/core/common_runtime/gpu/gpu_init.h"
+#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
@@ -243,6 +244,28 @@ TEST(EventMgr, NonEmptyShutdown) {
}
}
+// Tests that WarnIfInCallback() triggers correctly.
+TEST(EventMgr, WarnIfInCallback) {
+ auto stream_exec = GPUMachineManager()->ExecutorForDevice(0).ValueOrDie();
+ EventMgr em(stream_exec, GPUOptions());
+ TEST_EventMgrHelper th(&em);
+ std::unique_ptr<se::Stream> stream(new se::Stream(stream_exec));
+ CHECK(stream);
+ stream->Init();
+ bool hit = false;
+ gpu_event_mgr::WarnIfInCallback([&hit] { hit = true; });
+ EXPECT_FALSE(hit);
+ Notification note;
+ em.ThenExecute(stream.get(), [&hit, &note]() {
+ gpu_event_mgr::WarnIfInCallback([&hit, &note] {
+ hit = true;
+ note.Notify();
+ });
+ });
+ note.WaitForNotification();
+ EXPECT_TRUE(hit);
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/placer.cc b/tensorflow/core/common_runtime/placer.cc
index 1f0773d387..6781c87f6c 100644
--- a/tensorflow/core/common_runtime/placer.cc
+++ b/tensorflow/core/common_runtime/placer.cc
@@ -30,6 +30,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/util/status_util.h"
namespace tensorflow {
@@ -822,10 +823,10 @@ Status Placer::Run() {
std::vector<Device*>* devices;
Status status = colocation_graph.GetDevicesForNode(node, &devices);
if (!status.ok()) {
- return AttachDef(
- errors::InvalidArgument("Cannot assign a device for operation '",
- node->name(), "': ", status.error_message()),
- *node);
+ return AttachDef(errors::InvalidArgument(
+ "Cannot assign a device for operation ",
+ RichNodeName(node), ": ", status.error_message()),
+ *node);
}
// Returns the first device in sorted devices list so we will always
@@ -869,10 +870,10 @@ Status Placer::Run() {
std::vector<Device*>* devices;
Status status = colocation_graph.GetDevicesForNode(node, &devices);
if (!status.ok()) {
- return AttachDef(
- errors::InvalidArgument("Cannot assign a device for operation '",
- node->name(), "': ", status.error_message()),
- *node);
+ return AttachDef(errors::InvalidArgument(
+ "Cannot assign a device for operation ",
+ RichNodeName(node), ": ", status.error_message()),
+ *node);
}
int assigned_device = -1;
@@ -938,4 +939,22 @@ void Placer::LogDeviceAssignment(const Node* node) const {
}
}
+bool Placer::ClientHandlesErrorFormatting() const {
+ return options_ != nullptr &&
+ options_->config.experimental().client_handles_error_formatting();
+}
+
+// Returns the node name in single quotes. If the client handles formatted
+// errors, appends a formatting tag which the client will reformat into, for
+// example, " (defined at filename:123)".
+string Placer::RichNodeName(const Node* node) const {
+ string quoted_name = strings::StrCat("'", node->name(), "'");
+ if (ClientHandlesErrorFormatting()) {
+ string file_and_line = error_format_tag(*node, "${file}:${line}");
+ return strings::StrCat(quoted_name, " (defined at ", file_and_line, ")");
+ } else {
+ return quoted_name;
+ }
+}
+
} // namespace tensorflow
diff --git a/tensorflow/core/common_runtime/placer.h b/tensorflow/core/common_runtime/placer.h
index 75dce7c7fe..fce87269c5 100644
--- a/tensorflow/core/common_runtime/placer.h
+++ b/tensorflow/core/common_runtime/placer.h
@@ -87,6 +87,8 @@ class Placer {
// placement if the SessionOptions entry in 'options_' requests it.
void AssignAndLog(int assigned_device, Node* node) const;
void LogDeviceAssignment(const Node* node) const;
+ bool ClientHandlesErrorFormatting() const;
+ string RichNodeName(const Node* node) const;
Graph* const graph_; // Not owned.
const DeviceSet* const devices_; // Not owned.
diff --git a/tensorflow/core/common_runtime/placer_test.cc b/tensorflow/core/common_runtime/placer_test.cc
index 07a7724f16..cede899842 100644
--- a/tensorflow/core/common_runtime/placer_test.cc
+++ b/tensorflow/core/common_runtime/placer_test.cc
@@ -1142,6 +1142,50 @@ TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacement) {
EXPECT_TRUE(str_util::StrContains(s.error_message(), "/device:fakegpu:11"));
}
+// Test that the "Cannot assign a device" error message contains a format tag
+// when requested.
+TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementFormatTag) {
+ Graph g(OpRegistry::Global());
+ { // Scope for temporary variables used to construct g.
+ GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+ ops::SourceOp("TestDevice",
+ b.opts().WithName("in").WithDevice("/device:fakegpu:11"));
+ TF_EXPECT_OK(BuildGraph(b, &g));
+ }
+
+ SessionOptions options;
+ options.config.mutable_experimental()->set_client_handles_error_formatting(
+ true);
+ Status s = Place(&g, &options);
+ EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+ EXPECT_TRUE(
+ str_util::StrContains(s.error_message(),
+ "Cannot assign a device for operation 'in'"
+ " (defined at ^^node:in:${file}:${line}^^)"));
+}
+
+// Test that the "Cannot assign a device" error message does not contain a
+// format tag when not it shouldn't
+TEST_F(PlacerTest, TestNonexistentGpuNoAllowSoftPlacementNoFormatTag) {
+ Graph g(OpRegistry::Global());
+ { // Scope for temporary variables used to construct g.
+ GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+ ops::SourceOp("TestDevice",
+ b.opts().WithName("in").WithDevice("/device:fakegpu:11"));
+ TF_EXPECT_OK(BuildGraph(b, &g));
+ }
+
+ SessionOptions options;
+ options.config.mutable_experimental()->set_client_handles_error_formatting(
+ false);
+ Status s = Place(&g, &options);
+ EXPECT_EQ(error::INVALID_ARGUMENT, s.code());
+ EXPECT_TRUE(str_util::StrContains(
+ s.error_message(), "Cannot assign a device for operation 'in'"));
+ EXPECT_FALSE(str_util::StrContains(
+ s.error_message(), "'in' (defined at ^^node:in:${file}:${line}^^)"));
+}
+
// Test that placement fails when a node requests an explicit device that is not
// supported by the registered kernels if allow_soft_placement is no set.
TEST_F(PlacerTest, TestUnsupportedDeviceNoAllowSoftPlacement) {
diff --git a/tensorflow/core/common_runtime/process_state.cc b/tensorflow/core/common_runtime/process_state.cc
index 4d83b25ce6..447338e7bd 100644
--- a/tensorflow/core/common_runtime/process_state.cc
+++ b/tensorflow/core/common_runtime/process_state.cc
@@ -71,7 +71,7 @@ ProcessState::MemDesc ProcessState::PtrType(const void* ptr) {
return MemDesc();
}
-Allocator* ProcessState::GetCPUAllocator(int numa_node) {
+VisitableAllocator* ProcessState::GetCPUAllocator(int numa_node) {
CHECK_GE(numa_node, 0);
if (!numa_enabled_) numa_node = 0;
mutex_lock lock(mu_);
diff --git a/tensorflow/core/common_runtime/process_state.h b/tensorflow/core/common_runtime/process_state.h
index 0f4ae230bb..2892677333 100644
--- a/tensorflow/core/common_runtime/process_state.h
+++ b/tensorflow/core/common_runtime/process_state.h
@@ -65,7 +65,7 @@ class ProcessState {
// Returns the one CPUAllocator used for the given numa_node.
// TEMPORARY: ignores numa_node.
- Allocator* GetCPUAllocator(int numa_node);
+ VisitableAllocator* GetCPUAllocator(int numa_node);
typedef std::unordered_map<const void*, MemDesc> MDMap;
@@ -87,7 +87,7 @@ class ProcessState {
mutex mu_;
- std::vector<Allocator*> cpu_allocators_ GUARDED_BY(mu_);
+ std::vector<VisitableAllocator*> cpu_allocators_ GUARDED_BY(mu_);
virtual ~ProcessState();
diff --git a/tensorflow/core/common_runtime/session.cc b/tensorflow/core/common_runtime/session.cc
index 4a9248171b..8c30beeec2 100644
--- a/tensorflow/core/common_runtime/session.cc
+++ b/tensorflow/core/common_runtime/session.cc
@@ -53,27 +53,33 @@ Status Session::PRun(const string& handle,
Session* NewSession(const SessionOptions& options) {
SessionFactory* factory;
- const Status s = SessionFactory::GetFactory(options, &factory);
+ Status s = SessionFactory::GetFactory(options, &factory);
if (!s.ok()) {
LOG(ERROR) << s;
return nullptr;
}
- return factory->NewSession(options);
+ Session* out_session;
+ s = NewSession(options, &out_session);
+ if (!s.ok()) {
+ LOG(ERROR) << "Failed to create session: " << s;
+ return nullptr;
+ }
+ return out_session;
}
Status NewSession(const SessionOptions& options, Session** out_session) {
SessionFactory* factory;
- const Status s = SessionFactory::GetFactory(options, &factory);
+ Status s = SessionFactory::GetFactory(options, &factory);
if (!s.ok()) {
*out_session = nullptr;
LOG(ERROR) << s;
return s;
}
- *out_session = factory->NewSession(options);
- if (!*out_session) {
- return errors::Internal("Failed to create session.");
+ s = factory->NewSession(options, out_session);
+ if (!s.ok()) {
+ *out_session = nullptr;
}
- return Status::OK();
+ return s;
}
Status Reset(const SessionOptions& options,
diff --git a/tensorflow/core/common_runtime/session_factory.h b/tensorflow/core/common_runtime/session_factory.h
index df3198a70d..81c172c6ae 100644
--- a/tensorflow/core/common_runtime/session_factory.h
+++ b/tensorflow/core/common_runtime/session_factory.h
@@ -30,7 +30,12 @@ struct SessionOptions;
class SessionFactory {
public:
- virtual Session* NewSession(const SessionOptions& options) = 0;
+ // Creates a new session and stores it in *out_session, or fails with an error
+ // status if the Session could not be created. Caller takes ownership of
+ // *out_session if this returns Status::OK().
+ virtual Status NewSession(const SessionOptions& options,
+ Session** out_session) = 0;
+
virtual bool AcceptsOptions(const SessionOptions& options) = 0;
// Abort and close all existing sessions, disconnecting their resources from
diff --git a/tensorflow/core/common_runtime/session_test.cc b/tensorflow/core/common_runtime/session_test.cc
index feaf29c7bb..1fa5aad60c 100644
--- a/tensorflow/core/common_runtime/session_test.cc
+++ b/tensorflow/core/common_runtime/session_test.cc
@@ -47,8 +47,10 @@ class FakeSessionFactory : public SessionFactory {
return str_util::StartsWith(options.target, "fake");
}
- Session* NewSession(const SessionOptions& options) override {
- return nullptr;
+ Status NewSession(const SessionOptions& options,
+ Session** out_session) override {
+ *out_session = nullptr;
+ return Status::OK();
}
};
class FakeSessionRegistrar {
diff --git a/tensorflow/core/common_runtime/threadpool_device.cc b/tensorflow/core/common_runtime/threadpool_device.cc
index 74a87215e1..7406ecf4f8 100644
--- a/tensorflow/core/common_runtime/threadpool_device.cc
+++ b/tensorflow/core/common_runtime/threadpool_device.cc
@@ -111,7 +111,21 @@ Status ThreadPoolDevice::MakeTensorFromProto(
}
#ifdef INTEL_MKL
-REGISTER_MEM_ALLOCATOR("MklCPUAllocator", 200, MklCPUAllocator);
+namespace {
+class MklCPUAllocatorFactory : public AllocatorFactory {
+ public:
+ bool NumaEnabled() override { return false; }
+
+ Allocator* CreateAllocator() override { return new MklCPUAllocator; }
+
+ // Note: Ignores numa_node, for now.
+ virtual SubAllocator* CreateSubAllocator(int numa_node) {
+ return new MklSubAllocator;
+ }
+};
+
+REGISTER_MEM_ALLOCATOR("MklCPUAllocator", 200, MklCPUAllocatorFactory);
+} // namespace
#endif
} // namespace tensorflow
diff --git a/tensorflow/core/debug/BUILD b/tensorflow/core/debug/BUILD
index 36e9b3455a..591c22b8f6 100644
--- a/tensorflow/core/debug/BUILD
+++ b/tensorflow/core/debug/BUILD
@@ -82,25 +82,6 @@ cc_library(
)
tf_cuda_library(
- name = "debug_gateway_internal",
- srcs = ["debug_gateway.cc"],
- hdrs = ["debug_gateway.h"],
- copts = tf_copts(),
- linkstatic = 1,
- deps = [
- ":debug",
- "//tensorflow/core:core_cpu_internal",
- "//tensorflow/core:direct_session_internal",
- "//tensorflow/core:framework",
- "//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
- "//tensorflow/core:proto_text",
- "//tensorflow/core:protos_all_cc",
- ],
- alwayslink = 1,
-)
-
-tf_cuda_library(
name = "debugger_state_impl",
srcs = ["debugger_state_impl.cc"],
hdrs = ["debugger_state_impl.h"],
@@ -187,42 +168,6 @@ tf_cuda_library(
],
)
-# TODO(cais): Fix flakiness on GPU and change this back to a tf_cc_test_gpu.
-# See b/34081273.
-tf_cc_test(
- name = "debug_gateway_test",
- size = "small",
- srcs = ["debug_gateway_test.cc"],
- args = ["--heap_check=local"],
- linkstatic = tf_kernel_tests_linkstatic(),
- tags = [
- "no_cuda_on_cpu_tap",
- "no_gpu",
- ],
- deps = [
- ":debug",
- ":debug_gateway_internal",
- ":debug_graph_utils",
- "//tensorflow/cc:cc_ops",
- "//tensorflow/core:all_kernels",
- "//tensorflow/core:core_cpu",
- "//tensorflow/core:core_cpu_internal",
- "//tensorflow/core:direct_session",
- "//tensorflow/core:direct_session_internal",
- "//tensorflow/core:framework",
- "//tensorflow/core:framework_internal",
- "//tensorflow/core:gpu_runtime",
- "//tensorflow/core:lib",
- "//tensorflow/core:lib_internal",
- "//tensorflow/core:protos_all_cc",
- "//tensorflow/core:test",
- "//tensorflow/core:test_main",
- "//tensorflow/core:testlib",
- "//tensorflow/core/kernels:debug_ops",
- "//tensorflow/core/kernels:ops_util",
- ],
-)
-
tf_cc_test(
name = "debug_io_utils_test",
size = "small",
diff --git a/tensorflow/core/debug/debug_gateway.cc b/tensorflow/core/debug/debug_gateway.cc
deleted file mode 100644
index 2e1aabd1cc..0000000000
--- a/tensorflow/core/debug/debug_gateway.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/core/debug/debug_gateway.h"
-
-#include <utility>
-
-#include "tensorflow/core/common_runtime/device_factory.h"
-#include "tensorflow/core/common_runtime/session_factory.h"
-#include "tensorflow/core/framework/tensor.h"
-
-namespace tensorflow {
-
-DebugGateway::DebugGateway(DirectSession* session) : session_(session) {
- session_->node_outputs_callback_ =
- [this](const string& node_name, const int output_slot,
- const Tensor* tensor, const bool is_ref, OpKernelContext* ctx) {
- if (comp_cb_ != nullptr && output_slot <= 0) {
- // The node completion callback is invoked once for a node regardless
- // of whether the node has zero, one or more outputs.
- // The output_slot can be negative (-1, or kControlSlot) if
- // node_outputs_callback_ is invoked for a node with no output. If
- // that is the case, notify the callback that the node in question has
- // no output.
- comp_cb_(node_name, output_slot == 0);
- }
-
- // Copy tensor values (e.g., from GPU to host) only if the
- // value callback is not nullptr.
- if (val_cb_ != nullptr && output_slot >= 0) {
- CopyTensor(node_name, output_slot, tensor, ctx,
- [this, node_name, output_slot,
- is_ref](const Tensor* copied_tensor) {
- val_cb_(node_name, output_slot, *copied_tensor, is_ref);
- });
- }
-
- return Status::OK();
- };
-}
-
-DebugGateway::~DebugGateway() {
- if (session_ != nullptr) {
- session_->node_outputs_callback_ = nullptr;
- }
-}
-
-void DebugGateway::SetNodeCompletionCallback(NodeCompletionCallback callback) {
- comp_cb_ = std::move(callback);
-}
-
-void DebugGateway::SetNodeValueCallback(NodeValueCallback callback) {
- val_cb_ = std::move(callback);
-}
-
-void DebugGateway::CopyTensor(const string& node_name, const int output_slot,
- const Tensor* src_tensor, OpKernelContext* ctx,
- CopyDoneCallback copy_done_cb) {
- Device* device = static_cast<Device*>(ctx->device());
-
- // Determine if the tensor is initialized properly.
- // The second part of the check is necessary because in some cases, a
- // tensor can pass the IsInitialized() check, but the dtype is not set,
- // e.g., tf.FIFOQueue.
- if (src_tensor->IsInitialized() && DataTypeSize(src_tensor->dtype()) > 0) {
- // Tensor is initialized.
-
- string tensor_tag = strings::StrCat(node_name, ":", output_slot);
-
- // Create copied tensor on host
- Allocator* cpu_allocator = tensorflow::cpu_allocator();
- Tensor cpu_tensor(cpu_allocator, src_tensor->dtype(), src_tensor->shape());
-
- // Determine if the tensor is on device (GPU) or host (CPU).
- // The second part of the check is necessary because even an OpKernel on
- // may have output tensors allocated on CPU.
- if ((device->name().find("GPU:") != string::npos ||
- device->name().find("SYCL:") != string::npos) &&
- !ctx->output_alloc_attr(output_slot).on_host()) {
- // GPU tensors: Copy it to host (CPU).
- DeviceContext* device_ctxt = ctx->op_device_context();
-
- // Copy device (e.g., GPU) tensor to host and when done, invoke the
- // callback.
- device_ctxt->CopyDeviceTensorToCPU(
- src_tensor, "TensorCopy", device, &cpu_tensor,
- [node_name, cpu_tensor, copy_done_cb](const Status& s) {
- if (s.ok()) {
- copy_done_cb(&cpu_tensor);
- } else {
- LOG(ERROR) << "Copying of device Tensor " << node_name
- << " to CPU for debugging failed.";
- }
- });
- } else {
- // For CPU tensors, copy the source tensor and own the copy, because the
- // value callback may outlive the life time of the tensor and the tensor
- // may shared the underlying buffer with other tensors.
- cpu_tensor.UnsafeCopyFromInternal(*src_tensor, src_tensor->dtype(),
- src_tensor->shape());
-
- copy_done_cb(&cpu_tensor);
- }
- } else {
- // Tensor is not initialized: No need to copy.
- copy_done_cb(src_tensor);
- }
-}
-
-} // namespace tensorflow
diff --git a/tensorflow/core/debug/debug_gateway.h b/tensorflow/core/debug/debug_gateway.h
deleted file mode 100644
index bf5b6e08db..0000000000
--- a/tensorflow/core/debug/debug_gateway.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#ifndef TENSORFLOW_DEBUG_DEBUG_SESSION_H_
-#define TENSORFLOW_DEBUG_DEBUG_SESSION_H_
-
-#include <unordered_map>
-
-#include "tensorflow/core/common_runtime/direct_session.h"
-#include "tensorflow/core/common_runtime/executor.h"
-
-namespace tensorflow {
-
-// Experimental. tfdb (TensorFlow Debugger): Gateway to intermediate node
-// outputs during Session Run calls. Currently limited to DirectSession.
-class DebugGateway {
- public:
- DebugGateway(DirectSession* session);
- virtual ~DebugGateway();
-
- // Callback for node completion. This callback is invoked only once for
- // a node regardless of whether it has one or more outputs. The value(s) of
- // the output tensor(s) are not necessarily available when this callback is
- // invoked. They may need to be asynchronously copied from device (e.g.,
- // GPU) to host, hence the need for the NodeValueCallback below.
- //
- // Args:
- // node_name: Name of the node that has just completed execution
- // any_output: Whether the node has any output(s)
- typedef std::function<void(const string& node_name, const bool any_output)>
- NodeCompletionCallback;
- void SetNodeCompletionCallback(NodeCompletionCallback callback);
-
- // Callback for node value. This is invoked when the value of a node's
- // output tensor is available on the host, possibly after copying from
- // a device (e.g., GPU).
- //
- // Args:
- // node_name: Name of the node of which the output has become available
- // output_slot: Output slot number of the output Tensor
- // tensor_value: Reference to the tensor value
- // is_ref: Whether the output of the reference type
- typedef std::function<void(const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref)>
- NodeValueCallback;
- void SetNodeValueCallback(NodeValueCallback callback);
-
- // TODO(cais): Add whitelists for ops/tensors (e.g., {"A:0", "B:0"})
- // for node completion callback (whitelist_comp_) and node value callback
- // (whitelist_val_). If whitelist_comp_ is non-empty, the gateway will
- // invoke the NodeCompletionCallback only for the nodes specified in the
- // whitelist. And so forth for whitelist_val_.
-
- private:
- DirectSession* session_;
- // TODO(cais): DebugGateway currently supports only DirectSession. Add
- // support for GrpcSession.
-
- NodeCompletionCallback comp_cb_ = nullptr;
- NodeValueCallback val_cb_ = nullptr;
-
- typedef std::function<void(const Tensor* dst_tensor)> CopyDoneCallback;
-
- void CopyTensor(const string& node_name, const int output_slot,
- const Tensor* src_tensor, OpKernelContext* ctx,
- CopyDoneCallback copy_done_cb);
-};
-
-} // end namespace tensorflow
-
-#endif // TENSORFLOW_DEBUG_DEBUG_SESSION_H_
diff --git a/tensorflow/core/debug/debug_gateway_test.cc b/tensorflow/core/debug/debug_gateway_test.cc
deleted file mode 100644
index b1bbd3f698..0000000000
--- a/tensorflow/core/debug/debug_gateway_test.cc
+++ /dev/null
@@ -1,1011 +0,0 @@
-/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-
-#include "tensorflow/core/debug/debug_gateway.h"
-
-#include <algorithm>
-#include <cstdlib>
-#include <memory>
-#include <unordered_map>
-
-#include "tensorflow/core/debug/debug_graph_utils.h"
-#include "tensorflow/core/framework/tensor_testutil.h"
-#include "tensorflow/core/graph/testlib.h"
-#include "tensorflow/core/lib/core/notification.h"
-#include "tensorflow/core/lib/core/status_test_util.h"
-#include "tensorflow/core/lib/core/threadpool.h"
-#include "tensorflow/core/protobuf/rewriter_config.pb.h"
-
-namespace tensorflow {
-namespace {
-
-std::unique_ptr<DirectSession> CreateSession() {
- SessionOptions options;
- // Turn off graph optimizer so we can observe intermediate node states.
- options.config.mutable_graph_options()
- ->mutable_optimizer_options()
- ->set_opt_level(OptimizerOptions_Level_L0);
- options.config.mutable_graph_options()
- ->mutable_rewrite_options()
- ->set_constant_folding(RewriterConfig::OFF);
- options.config.mutable_graph_options()
- ->mutable_rewrite_options()
- ->set_dependency_optimization(RewriterConfig::OFF);
-
- return std::unique_ptr<DirectSession>(
- dynamic_cast<DirectSession*>(NewSession(options)));
-}
-
-class SessionDebugMinusAXTest : public ::testing::Test {
- public:
- void Initialize(std::initializer_list<float> a_values) {
- Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
- Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
- test::FillValues<float>(&a_tensor, a_values);
- Node* a = test::graph::Constant(&graph, a_tensor);
- a->set_assigned_device_name(kDeviceName);
- a_ = a->name();
-
- Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
- test::FillValues<float>(&x_tensor, {1, 1});
- Node* x = test::graph::Constant(&graph, x_tensor);
- x->set_assigned_device_name(kDeviceName);
- x_ = x->name();
-
- // y = A * x
- Node* y = test::graph::Matmul(&graph, a, x, false, false);
- y->set_assigned_device_name(kDeviceName);
- y_ = y->name();
-
- Node* y_neg = test::graph::Unary(&graph, "Neg", y);
- y_neg_ = y_neg->name();
- y_neg->set_assigned_device_name(kDeviceName);
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string a_;
- string x_;
- string y_;
- string y_neg_;
- GraphDef def_;
-};
-
-TEST_F(SessionDebugMinusAXTest, RunSimpleNetwork) {
- Initialize({3, 2, -1, 0});
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_nodes_w_outputs;
- std::vector<string> completed_nodes_wo_outputs;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [&mu, &completed_nodes_w_outputs, &completed_nodes_wo_outputs](
- const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output) {
- completed_nodes_w_outputs.push_back(node_name);
- } else {
- completed_nodes_wo_outputs.push_back(node_name);
- }
- });
-
- std::vector<bool> tensors_initialized;
- std::unordered_map<string, Tensor> tensor_vals;
- // output_slot values recorded in value callbacks
- std::vector<int> output_slots_val;
- // is_ref values recorded in value callbacks
- std::vector<bool> is_refs_val;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &tensors_initialized, &tensor_vals, &output_slots_val,
- &is_refs_val,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- tensors_initialized.push_back(tensor_value.IsInitialized());
- tensor_vals.insert(std::make_pair(node_name, tensor_value));
- output_slots_val.push_back(output_slot);
- is_refs_val.push_back(is_ref);
-
- // Set the notification once we have the value from the target node.
- if (node_name == y_neg_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- TF_ASSERT_OK(session->Create(def_));
-
- std::vector<std::pair<string, Tensor>> inputs;
-
- // Request two targets: one fetch output and one non-fetched output.
- std::vector<string> output_names = {y_ + ":0"};
- std::vector<string> target_nodes = {y_neg_};
- std::vector<Tensor> outputs;
- Status s = session->Run(inputs, output_names, target_nodes, &outputs);
- TF_ASSERT_OK(s);
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- ASSERT_EQ(1, outputs.size());
- // The first output should be initialized and have the correct
- // output.
- auto mat = outputs[0].matrix<float>();
- ASSERT_TRUE(outputs[0].IsInitialized());
- EXPECT_FLOAT_EQ(5.0, mat(0, 0));
-
- // Verify the calling history of the completion callback
- // The following verifies each node with output(s) invoked the callback
- // exactly once.
- ASSERT_GE(completed_nodes_w_outputs.size(), 4); // There may be added nodes.
-
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), a_));
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), x_));
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), y_));
- ASSERT_EQ(1, std::count(completed_nodes_w_outputs.begin(),
- completed_nodes_w_outputs.end(), y_neg_));
-
- // Apart from nodes with outputs, there are also no-output (control) nodes.
- // They ought to be captured by the DebugGateway through
- // NodeOutputCallback as well.
- ASSERT_GT(completed_nodes_wo_outputs.size(), 0);
-
- // The DebugGateway should have captured the _SOURCE node.
- ASSERT_LE(1, std::count(completed_nodes_wo_outputs.begin(),
- completed_nodes_wo_outputs.end(), "_SOURCE"));
-
- // Verify the calling history of the value callabck
- ASSERT_EQ(completed_nodes_w_outputs.size(), tensors_initialized.size());
-
- // In this graph, there is no uninitialized node value.
- ASSERT_EQ(
- tensors_initialized.end(),
- std::find(tensors_initialized.begin(), tensors_initialized.end(), false));
-
- ASSERT_EQ(completed_nodes_w_outputs.size(), tensor_vals.size());
- ASSERT_EQ(completed_nodes_w_outputs.size(), output_slots_val.size());
- ASSERT_EQ(completed_nodes_w_outputs.size(), is_refs_val.size());
-
- // Verify the intermediate tensor values captured through the value callback
- auto mat_a = tensor_vals[a_].matrix<float>();
- ASSERT_EQ(3.0, mat_a(0, 0));
- ASSERT_EQ(2.0, mat_a(0, 1));
- ASSERT_EQ(-1.0, mat_a(1, 0));
- ASSERT_EQ(0.0, mat_a(1, 1));
-
- auto mat_x = tensor_vals[x_].matrix<float>();
- ASSERT_EQ(1.0, mat_x(0, 0));
- ASSERT_EQ(1.0, mat_x(1, 0));
-
- auto mat_y = tensor_vals[y_].matrix<float>();
- ASSERT_EQ(5.0, mat_y(0, 0));
- ASSERT_EQ(-1.0, mat_y(1, 0));
-
- auto mat_y_neg = tensor_vals[y_neg_].matrix<float>();
- ASSERT_EQ(-5.0, mat_y_neg(0, 0));
- ASSERT_EQ(1.0, mat_y_neg(1, 0));
-
- // In this graph, all outputs are on the first slot
- ASSERT_EQ(output_slots_val.size(),
- std::count_if(output_slots_val.begin(), output_slots_val.end(),
- [](int slot) { return slot == 0; }));
-
- // In this graph, there is no ref-type tensor.
- ASSERT_EQ(is_refs_val.end(),
- std::find(is_refs_val.begin(), is_refs_val.end(), true));
-}
-
-TEST_F(SessionDebugMinusAXTest, RunSimpleNetworkWithTwoDebugNodesInserted) {
- // Tensor contains one count of NaN
- Initialize({3, std::numeric_limits<float>::quiet_NaN(), -1, 0});
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- // Create debug tensor watch options with two debug ops:
- // DebugIdentity and DebugNanCount
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
-
- const string debug_identity = "DebugIdentity";
- const string debug_nan_count = "DebugNanCount";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(y_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
- tensor_watch_opts->add_debug_ops(debug_nan_count);
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(y_, ":", 0), 0, debug_identity);
- string debug_nan_count_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(y_, ":", 0), 1, debug_nan_count);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_debug_nodes;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [&mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &completed_debug_nodes](const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output && (node_name == debug_identity_node_name ||
- node_name == debug_nan_count_node_name)) {
- completed_debug_nodes.push_back(node_name);
- }
- });
-
- std::vector<Tensor> watched_tensor_vals;
- std::vector<Tensor> debug_identity_tensor_vals;
- std::vector<Tensor> debug_nan_count_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &watched_tensor_vals, &debug_identity_tensor_vals,
- &debug_nan_count_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == y_) {
- watched_tensor_vals.push_back(tensor_value);
- } else if (node_name == debug_identity_node_name && output_slot == 0) {
- // output_slot == 0 carries the debug signal. Same below.
- debug_identity_tensor_vals.push_back(tensor_value);
- } else if (node_name == debug_nan_count_node_name && output_slot == 0) {
- debug_nan_count_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == y_neg_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- TF_ASSERT_OK(session->Create(def_));
-
- std::vector<std::pair<string, Tensor>> inputs;
-
- // Request two targets: one fetch output and one non-fetched output.
- std::vector<string> output_names = {y_ + ":0"};
- std::vector<string> target_nodes = {y_neg_};
- std::vector<Tensor> outputs;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
-// Verify the correct number of partition graphs (GraphDefs) outputted
-// through RunMetadata, given whether GPU is involved.
-#if GOOGLE_CUDA
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#elif defined(TENSORFLOW_USE_SYCL)
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#else
- ASSERT_EQ(1, run_metadata.partition_graphs().size());
-#endif
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- // Verify that each of the two debug nodes has completed exactly once.
- ASSERT_EQ(2, completed_debug_nodes.size());
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_identity_node_name));
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_nan_count_node_name));
-
- // Verify that the tensor values from the watched node and the identity
- // debug node are received and they are equal (owing to the debug op being
- // "DebugIdentity")
- ASSERT_EQ(1, watched_tensor_vals.size());
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- auto mat_y = watched_tensor_vals[0].matrix<float>();
- auto mat_identity = debug_identity_tensor_vals[0].matrix<float>();
- // ASSERT_EQ doesn't work for nan == nan
- ASSERT_TRUE(std::isnan(mat_y(0, 0)));
- ASSERT_TRUE(std::isnan(mat_identity(0, 0)));
- ASSERT_EQ(-1, mat_identity(1, 0));
-
- // Verify that the output from the NaN-count debug node indicates exactly
- // one NaN.
- ASSERT_EQ(1, debug_nan_count_tensor_vals.size());
- ASSERT_EQ(1, debug_nan_count_tensor_vals[0].scalar<int64>()());
-}
-
-#if !defined(GOOGLE_CUDA) && !defined(TENSORFLOW_USE_SYCL)
-// TODO(cais): Reinstate the following test for concurrent debugged runs on
-// a GPU once the root cause of the ~0.5% flakiness has been addressed.
-// (b/34081273)
-TEST_F(SessionDebugMinusAXTest,
- RunSimpleNetworkConcurrentlyWithDifferentDebugTensorWatches) {
- // Test concurrent Run() calls on a graph with different debug watches.
-
- Initialize({3, 2, -1, 0});
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
- TF_ASSERT_OK(session->Create(def_));
-
- // Number of concurrent Run() calls to launch.
- const int kConcurrentRuns = 3;
- thread::ThreadPool* tp =
- new thread::ThreadPool(Env::Default(), "test", kConcurrentRuns);
-
- std::vector<string> output_names = {y_ + ":0"};
- std::vector<string> target_nodes = {y_neg_};
-
- mutex mu;
- DebugGateway debug_gateway(session.get());
- std::unordered_map<string, Tensor> debug_identity_tensor_vals;
-
- const string debug_identity = "DebugIdentity";
-
- const string a_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(a_, ":", 0), 0, debug_identity);
- const string x_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(x_, ":", 0), 0, debug_identity);
- const string y_debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(y_, ":", 0), 0, debug_identity);
-
- Notification callbacks_done;
- volatile int val_callback_count = 0;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &val_callback_count, &a_debug_identity_node_name,
- &x_debug_identity_node_name, &y_debug_identity_node_name,
- &debug_identity_tensor_vals, &callbacks_done,
- &kConcurrentRuns](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
-
- if (node_name == a_debug_identity_node_name && output_slot == 0) {
- debug_identity_tensor_vals["a"] = tensor_value;
- val_callback_count++;
- } else if (node_name == x_debug_identity_node_name &&
- output_slot == 0) {
- // output_slot == 0 carries the debug signal.
- debug_identity_tensor_vals["x"] = tensor_value;
- val_callback_count++;
- } else if (node_name == y_debug_identity_node_name &&
- output_slot == 0) {
- debug_identity_tensor_vals["y"] = tensor_value;
- val_callback_count++;
- }
-
- // Set the notification once we have the value from the callbacks from
- // all the concurrent Run() calls.
- if (val_callback_count == kConcurrentRuns &&
- !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- int run_counter = 0;
- mutex run_lock;
-
- // Function to be executed concurrently.
- auto fn = [this, &run_lock, &run_counter, &session, output_names,
- target_nodes, &debug_identity]() {
- // Create unique debug tensor watch options for each of the concurrent
- // run calls.
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
-
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
-
- {
- // Let the concurrent runs watch different tensors.
-
- mutex_lock l(run_lock);
-
- if (run_counter == 0) {
- // Let the 1st concurrent run watch a.
- tensor_watch_opts->set_node_name(a_);
- } else if (run_counter == 1) {
- // Let the 2nd concurrent watch x.
- tensor_watch_opts->set_node_name(x_);
- } else if (run_counter == 2) {
- // Let the 3rd concurrent watch y.
- tensor_watch_opts->set_node_name(y_);
- }
-
- run_counter++;
- }
-
- // Run the graph.
- RunMetadata run_metadata;
- std::vector<std::pair<string, Tensor>> inputs;
- std::vector<Tensor> outputs;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
- ASSERT_EQ(1, run_metadata.partition_graphs().size());
-
- ASSERT_EQ(1, outputs.size());
- ASSERT_TRUE(outputs[0].IsInitialized());
- ASSERT_EQ(TensorShape({2, 1}), outputs[0].shape());
- auto mat = outputs[0].matrix<float>();
- EXPECT_FLOAT_EQ(5.0, mat(0, 0));
- EXPECT_FLOAT_EQ(-1.0, mat(1, 0));
- };
-
- for (int i = 0; i < kConcurrentRuns; ++i) {
- tp->Schedule(fn);
- }
-
- // Wait for the debug callbacks to finish.
- callbacks_done.WaitForNotification();
-
- // Wait for the concurrent functions with Run() calls to finish.
- delete tp;
-
- {
- mutex_lock l(mu);
-
- ASSERT_EQ(kConcurrentRuns, val_callback_count);
- ASSERT_EQ(kConcurrentRuns, debug_identity_tensor_vals.size());
-
- ASSERT_EQ(TensorShape({2, 2}), debug_identity_tensor_vals["a"].shape());
- auto a_mat_identity = debug_identity_tensor_vals["a"].matrix<float>();
- ASSERT_EQ(3.0, a_mat_identity(0, 0));
- ASSERT_EQ(2.0, a_mat_identity(0, 1));
- ASSERT_EQ(-1.0, a_mat_identity(1, 0));
- ASSERT_EQ(0.0, a_mat_identity(1, 1));
-
- ASSERT_EQ(TensorShape({2, 1}), debug_identity_tensor_vals["x"].shape());
- auto x_mat_identity = debug_identity_tensor_vals["x"].matrix<float>();
- ASSERT_EQ(1.0, x_mat_identity(0, 0));
- ASSERT_EQ(1.0, x_mat_identity(1, 0));
-
- ASSERT_EQ(TensorShape({2, 1}), debug_identity_tensor_vals["y"].shape());
- auto y_mat_identity = debug_identity_tensor_vals["y"].matrix<float>();
- ASSERT_EQ(5.0, y_mat_identity(0, 0));
- ASSERT_EQ(-1.0, y_mat_identity(1, 0));
- }
-}
-#endif
-
-class SessionDebugOutputSlotWithoutOutgoingEdgeTest : public ::testing::Test {
- public:
- void Initialize() {
- Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
- Tensor a_tensor(DT_FLOAT, TensorShape({1, 1}));
- test::FillValues<float>(&a_tensor, {42.0});
- Node* a = test::graph::Constant(&graph, a_tensor);
- a->set_assigned_device_name(kDeviceName);
-
- Node* c = test::graph::Constant(&graph, a_tensor);
- c->set_assigned_device_name(kDeviceName);
- c_ = c->name();
-
- // Node c will be executed only because of the control edge from c to y.
- // Its output slot (slot 0) does not have an outgoing edge. This test
- // is for testing that the debugger can watch that slot properly.
- Node* y = test::graph::NoOp(&graph, {c});
- y->set_assigned_device_name(kDeviceName);
- y_ = y->name();
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string c_;
- string y_;
- GraphDef def_;
-};
-
-TEST_F(SessionDebugOutputSlotWithoutOutgoingEdgeTest,
- WatchSlotWithoutOutgoingEdge) {
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- // Supply completion and value callbacks
- mutex mu;
-
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(c_, ":", 0), 0, "DebugIdentity");
-
- Notification callbacks_done;
-
- std::vector<Tensor> debug_identity_tensor_vals;
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &callbacks_done, &debug_identity_node_name,
- &debug_identity_tensor_vals](
- const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
-
- if (node_name == debug_identity_node_name && output_slot == 0) {
- debug_identity_tensor_vals.push_back(tensor_value);
-
- if (!callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- }
- });
-
- // Add DebugIdentity watch on c:0, which does not have an outgoing edge.
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
-
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(c_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops("DebugIdentity");
-
- TF_ASSERT_OK(session->Create(def_));
-
- // Invoke Session::Run() on y.
- std::vector<std::pair<string, Tensor>> inputs;
- std::vector<string> output_names;
- std::vector<string> target_nodes = {y_};
- std::vector<Tensor> outputs;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- // Assert that DebugIdentity node watching the control edge has been run.
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- auto mat_identity = debug_identity_tensor_vals[0].matrix<float>();
- ASSERT_EQ(42.0, mat_identity(0, 0));
-}
-
-class SessionDebugVariableTest : public ::testing::Test {
- public:
- void Initialize() {
- Graph graph(OpRegistry::Global());
-
-#if GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif defined(TENSORFLOW_USE_SYCL)
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#else
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:CPU:0";
-#endif
-
- // Define variable node.
- var_node_name_ = "var";
- Node* var =
- test::graph::Var(&graph, DT_FLOAT, TensorShape({3}), var_node_name_);
- var->set_assigned_device_name(kDeviceName);
-
- // Define the initial value and the initial-value node.
- Tensor nan_nan_seven(DT_FLOAT, TensorShape({3}));
- nan_nan_seven.flat<float>()(0) = std::numeric_limits<float>::quiet_NaN();
- nan_nan_seven.flat<float>()(1) = std::numeric_limits<float>::quiet_NaN();
- nan_nan_seven.flat<float>()(2) = 7.0;
-
- init_val_node_name_ = "init_val";
- Node* init_val =
- test::graph::Constant(&graph, nan_nan_seven, init_val_node_name_);
- init_val->set_assigned_device_name(kDeviceName);
-
- // Define node for variable value initialization
- Node* init = test::graph::Assign(&graph, var, init_val);
- init->set_assigned_device_name(kDeviceName);
- init_node_name_ = init->name();
-
- // Define new value node
- Tensor nan_eight_eight(DT_FLOAT, TensorShape({3}));
- nan_eight_eight.flat<float>()(0) = std::numeric_limits<float>::quiet_NaN();
- nan_eight_eight.flat<float>()(1) = 8.0;
- nan_eight_eight.flat<float>()(2) = 8.0;
-
- Node* new_val = test::graph::Constant(&graph, nan_eight_eight);
- new_val->set_assigned_device_name(kDeviceName);
- new_val_node_name_ = new_val->name();
-
- // Define node for assigning new value
- Node* assign = test::graph::Assign(&graph, var, new_val);
- assign->set_assigned_device_name(kDeviceName);
- assign_node_name_ = assign->name();
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string var_node_name_;
- string init_val_node_name_;
- string init_node_name_;
- string new_val_node_name_;
- string assign_node_name_;
- GraphDef def_;
-};
-
-TEST_F(SessionDebugVariableTest, WatchUninitializedVariableWithDebugOps) {
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- TF_ASSERT_OK(session->Create(def_));
-
- // Set up DebugTensorWatch for an uninitialized tensor (in node var).
- RunOptions run_opts;
- const string debug_identity = "DebugIdentity";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(var_node_name_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(var_node_name_, ":", 0), 0, debug_identity);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_debug_nodes;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [this, &mu, &debug_identity_node_name, &completed_debug_nodes,
- &callbacks_done](const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output && (node_name == debug_identity_node_name)) {
- completed_debug_nodes.push_back(node_name);
- }
- });
-
- std::vector<Tensor> debug_identity_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_identity_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == debug_identity_node_name && output_slot == 0) {
- // output_slot == 0 carries the debug signal. Same below.
- debug_identity_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == init_node_name_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- // First run the initialization op
- std::vector<std::pair<string, Tensor>> inputs_init;
- std::vector<Tensor> outputs_init;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs_init, {init_node_name_}, {},
- &outputs_init, &run_metadata);
- TF_ASSERT_OK(s);
-
- callbacks_done.WaitForNotification();
-
- ASSERT_EQ(1, completed_debug_nodes.size());
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_identity_node_name));
-
- // Assert the output reflects the uninitialized nature of var's tensor.
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- ASSERT_FALSE(debug_identity_tensor_vals[0].IsInitialized());
- ASSERT_EQ(DT_FLOAT, debug_identity_tensor_vals[0].dtype());
- ASSERT_EQ(TensorShape({3}), debug_identity_tensor_vals[0].shape());
-}
-
-TEST_F(SessionDebugVariableTest, VariableAssignWithDebugOps) {
- // Tensor contains one count of NaN
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- TF_ASSERT_OK(session->Create(def_));
-
- // First run the initialization op
- std::vector<std::pair<string, Tensor>> inputs_init;
- std::vector<Tensor> outputs_init;
- Status s = session->Run(inputs_init, {init_node_name_}, {}, &outputs_init);
- TF_ASSERT_OK(s);
-
- // Create debug tensor watch options with two ref-type debug ops:
- // DebugIdentity and DebugNanCount
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
- const string debug_identity = "DebugIdentity";
- const string debug_nan_count = "DebugNanCount";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(var_node_name_);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
- tensor_watch_opts->add_debug_ops(debug_nan_count);
-
- char tempdir_template[] = "/tmp/tfdbg_XXXXXX";
- string temp_dir(mkdtemp(tempdir_template));
- tensor_watch_opts->add_debug_urls(strings::StrCat("file://", temp_dir));
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(var_node_name_, ":", 0), 0, debug_identity);
- string debug_nan_count_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(var_node_name_, ":", 0), 1, debug_nan_count);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_debug_nodes;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &completed_debug_nodes,
- &callbacks_done](const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output && (node_name == debug_identity_node_name ||
- node_name == debug_nan_count_node_name)) {
- completed_debug_nodes.push_back(node_name);
- }
- });
-
- std::vector<Tensor> debug_identity_tensor_vals;
- std::vector<Tensor> debug_nan_count_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_nan_count_node_name,
- &debug_identity_tensor_vals, &debug_nan_count_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == debug_identity_node_name && output_slot == 0) {
- // output_slot == 0 carries the debug signal. Same below.
- debug_identity_tensor_vals.push_back(tensor_value);
- } else if (node_name == debug_nan_count_node_name && output_slot == 0) {
- debug_nan_count_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == assign_node_name_ &&
- !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- // // Request two targets: one fetch output and one non-fetched output.
- std::vector<std::pair<string, Tensor>> inputs;
- std::vector<string> output_names = {assign_node_name_ + ":0"};
- std::vector<string> target_nodes = {assign_node_name_};
- std::vector<Tensor> outputs;
-
- // Run with RunOptions that has tensor watches
- RunMetadata run_metadata;
- s = session->Run(run_opts, inputs, output_names, target_nodes, &outputs,
- &run_metadata);
- TF_ASSERT_OK(s);
-
-#if GOOGLE_CUDA
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#elif defined(TENSORFLOW_USE_SYCL)
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-#else
- ASSERT_EQ(1, run_metadata.partition_graphs().size());
-#endif
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- // Verify that the update has happened properly.
- ASSERT_EQ(1, outputs.size());
- ASSERT_TRUE(std::isnan(outputs[0].vec<float>()(0)));
- ASSERT_EQ(8.0, outputs[0].vec<float>()(1)); // Expect new value
- ASSERT_EQ(8.0, outputs[0].vec<float>()(2)); // Expect new value
-
- // Verify that each of the two debug nodes has completed exactly once.
- ASSERT_EQ(2, completed_debug_nodes.size());
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_identity_node_name));
- ASSERT_EQ(
- 1, std::count(completed_debug_nodes.begin(), completed_debug_nodes.end(),
- debug_nan_count_node_name));
-
- // Verify that the values from the ref identity node reflects the value
- // before the new assign.
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
-
- auto vec_identity = debug_identity_tensor_vals[0].vec<float>();
- ASSERT_TRUE(std::isnan(vec_identity(0)));
- ASSERT_TRUE(std::isnan(vec_identity(1)));
- ASSERT_EQ(7.0, vec_identity(2));
-
- // Verify that the output from the NaN-count debug node indicates exactly
- // two NaNs, i.e., reflecting the value before the new assign.
- ASSERT_EQ(1, debug_nan_count_tensor_vals.size());
- ASSERT_EQ(2, debug_nan_count_tensor_vals[0].scalar<int64>()());
-}
-
-#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_SYCL)
-class SessionDebugGPUSwitchTest : public ::testing::Test {
- public:
- void Initialize() {
- Graph graph(OpRegistry::Global());
-
-#ifdef GOOGLE_CUDA
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:GPU:0";
-#elif TENSORFLOW_USE_SYCL
- const string kDeviceName = "/job:localhost/replica:0/task:0/device:SYCL:0";
-#endif
-
- Tensor vb(DT_BOOL, TensorShape({}));
- vb.scalar<bool>()() = true;
- Tensor vi(DT_INT64, TensorShape({}));
- vi.scalar<int>()() = 42;
- // So vi is expected to be forwarded to the second output port of sw.
-
- Node* pred = test::graph::Constant(&graph, vb);
- pred->set_assigned_device_name(kDeviceName);
- pred_node_name_ = pred->name();
-
- Node* value = test::graph::Constant(&graph, vi);
- pred->set_assigned_device_name(kDeviceName);
- value_node_name_ = value->name();
-
- Node* sw = test::graph::Switch(&graph, value, pred);
- sw->set_assigned_device_name(kDeviceName);
- sw_node_name_ = sw->name();
-
- Node* z = test::graph::Identity(&graph, sw, 1);
- sw->set_assigned_device_name(kDeviceName);
- z_node_name_ = z->name();
-
- test::graph::ToGraphDef(&graph, &def_);
- }
-
- string pred_node_name_;
- string value_node_name_;
- string sw_node_name_;
- string z_node_name_;
- GraphDef def_;
-};
-
-// Test for debug-watching tensors marked as HOST_MEMORY on GPU.
-TEST_F(SessionDebugGPUSwitchTest, RunSwitchWithHostMemoryDebugOp) {
- Initialize();
- auto session = CreateSession();
- ASSERT_TRUE(session != nullptr);
-
- DebugGateway debug_gateway(session.get());
-
- RunOptions run_opts;
- run_opts.set_output_partition_graphs(true);
- // This is the name of the boolean tensor fed as pred to the Switch node.
- // On GPU, this edge is HOST_MEMORY.
- const string watched_tensor = strings::StrCat(pred_node_name_, "/_1");
-
- const string debug_identity = "DebugIdentity";
- DebugTensorWatch* tensor_watch_opts =
- run_opts.mutable_debug_options()->add_debug_tensor_watch_opts();
- tensor_watch_opts->set_node_name(watched_tensor);
- tensor_watch_opts->set_output_slot(0);
- tensor_watch_opts->add_debug_ops(debug_identity);
-
- // Expected name of the inserted debug node
- string debug_identity_node_name = DebugNodeInserter::GetDebugNodeName(
- strings::StrCat(watched_tensor, ":", 0), 0, debug_identity);
-
- // Supply completion and value callbacks
- mutex mu;
- // Completed nodes with and without outputs
- std::vector<string> completed_nodes_w_outputs;
- std::vector<string> completed_nodes_wo_outputs;
-
- Notification callbacks_done;
- debug_gateway.SetNodeCompletionCallback(
- [&mu, &completed_nodes_w_outputs, &completed_nodes_wo_outputs](
- const string& node_name, const bool any_output) {
- mutex_lock l(mu);
- if (any_output) {
- completed_nodes_w_outputs.push_back(node_name);
- } else {
- completed_nodes_wo_outputs.push_back(node_name);
- }
- });
-
- std::vector<Tensor> debug_identity_tensor_vals;
-
- debug_gateway.SetNodeValueCallback(
- [this, &mu, &debug_identity_node_name, &debug_identity_tensor_vals,
- &callbacks_done](const string& node_name, const int output_slot,
- const Tensor& tensor_value, const bool is_ref) {
- mutex_lock l(mu);
- if (node_name == debug_identity_node_name && output_slot == 0) {
- debug_identity_tensor_vals.push_back(tensor_value);
- }
-
- // Set the notification once we have the value from the target node.
- if (node_name == z_node_name_ && !callbacks_done.HasBeenNotified()) {
- callbacks_done.Notify();
- }
- });
-
- TF_ASSERT_OK(session->Create(def_));
-
- std::vector<std::pair<string, Tensor>> inputs;
-
- // Request two targets: one fetch output and one non-fetched output.
- std::vector<string> output_names = {z_node_name_ + ":0"};
- std::vector<string> target_nodes = {z_node_name_};
- std::vector<Tensor> outputs;
-
- RunMetadata run_metadata;
- Status s = session->Run(run_opts, inputs, output_names, target_nodes,
- &outputs, &run_metadata);
- TF_ASSERT_OK(s);
-
- ASSERT_EQ(2, run_metadata.partition_graphs().size());
-
- // Wait for callbacks to complete.
- callbacks_done.WaitForNotification();
-
- ASSERT_EQ(1, debug_identity_tensor_vals.size());
- ASSERT_TRUE(debug_identity_tensor_vals[0].scalar<bool>()());
-}
-#endif // GOOGLE_CUDA
-
-} // end namespace
-} // end namespace tensorflow
diff --git a/tensorflow/core/distributed_runtime/eager/eager_client.h b/tensorflow/core/distributed_runtime/eager/eager_client.h
index 9ba8c8d80c..707f3234b9 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_client.h
+++ b/tensorflow/core/distributed_runtime/eager/eager_client.h
@@ -39,6 +39,7 @@ class EagerClient {
CLIENT_METHOD(KeepAlive);
CLIENT_METHOD(CloseContext);
CLIENT_METHOD(RegisterFunction);
+ CLIENT_METHOD(SendTensor);
#undef CLIENT_METHOD
};
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
index 466e779fab..916c8720f0 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
@@ -81,10 +81,11 @@ Status GetNumRetvals(tensorflow::EagerContext* context, const string& op_name,
Status EagerServiceImpl::CreateContext(const CreateContextRequest* request,
CreateContextResponse* response) {
- //make sure env_ , env_->rendezvous_mgr available
+ // make sure env_ , env_->rendezvous_mgr available
if (env_ == nullptr || env_->rendezvous_mgr == nullptr) {
- return tensorflow::errors::Internal("invalid eager env_ or env_->rendezvous_mgr.");
- }
+ return tensorflow::errors::Internal(
+ "invalid eager env_ or env_->rendezvous_mgr.");
+ }
std::vector<tensorflow::Device*> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddDevices(
@@ -266,6 +267,35 @@ Status EagerServiceImpl::RegisterFunction(
return context->Context()->AddFunctionDef(request->function_def());
}
+Status EagerServiceImpl::SendTensor(const SendTensorRequest* request,
+ SendTensorResponse* response) {
+ ServerContext* context = nullptr;
+ TF_RETURN_IF_ERROR(GetServerContext(request->context_id(), &context));
+ core::ScopedUnref context_unref(context);
+
+ tensorflow::gtl::InlinedVector<tensorflow::TensorHandle*, 2> tensors;
+ for (const auto& tensor_proto : request->tensors()) {
+ Tensor tensor;
+ if (!tensor.FromProto(tensor_proto)) {
+ return errors::InvalidArgument("Unable to parse tensor proto");
+ }
+
+ TensorHandle* tensor_handle =
+ new TensorHandle(tensor, nullptr, nullptr, nullptr);
+
+ TensorHandle* copied_handle = nullptr;
+ TF_RETURN_IF_ERROR(EagerCopyToDevice(tensor_handle, context->Context(),
+ request->device_name().c_str(),
+ &copied_handle));
+ tensors.push_back(copied_handle);
+ tensor_handle->Unref();
+ }
+
+ context->AddOperationOutputs(tensors, request->op_id());
+
+ return Status::OK();
+}
+
tensorflow::Status EagerServiceImpl::GetServerContext(
uint64 context_id, ServerContext** server_context) {
mutex_lock l(contexts_mu_);
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl.h b/tensorflow/core/distributed_runtime/eager/eager_service_impl.h
index b0e4aa84b9..718b4e2457 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl.h
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl.h
@@ -62,6 +62,9 @@ class EagerServiceImpl {
Status RegisterFunction(const RegisterFunctionRequest* request,
RegisterFunctionResponse* response);
+ Status SendTensor(const SendTensorRequest* request,
+ SendTensorResponse* response);
+
protected:
// This is the server-side execution context. All state regarding execution of
// a client's ops is held in this server-side context (all generated tensors,
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
index b98386ba86..d1f2a6da8f 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
@@ -84,7 +84,7 @@ class EagerServiceImplTest : public ::testing::Test {
std::unique_ptr<DeviceMgr> device_mgr_;
};
-void SetTensorProto(AttrValue* val) {
+void SetTensorProto(TensorProto* tensor_proto) {
int64_t dims[] = {2, 2};
float data[] = {1.0f, 2.0f, 3.0f, 4.0f};
TF_Tensor* t = TF_AllocateTensor(
@@ -92,7 +92,7 @@ void SetTensorProto(AttrValue* val) {
memcpy(TF_TensorData(t), &data[0], TF_TensorByteSize(t));
tensorflow::Tensor tensor;
TF_ASSERT_OK(tensorflow::TF_TensorToTensor(t, &tensor));
- tensor.AsProtoTensorContent(val->mutable_tensor());
+ tensor.AsProtoTensorContent(tensor_proto);
TF_DeleteTensor(t);
}
@@ -175,7 +175,7 @@ TEST_F(EagerServiceImplTest, BasicTest) {
val.set_type(tensorflow::DataType::DT_FLOAT);
const_attrs.insert({"dtype", val});
val.Clear();
- SetTensorProto(&val);
+ SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
@@ -260,7 +260,7 @@ TEST_F(EagerServiceImplTest, BasicFunctionTest) {
const_attrs.insert({"dtype", val});
val.Clear();
- SetTensorProto(&val);
+ SetTensorProto(val.mutable_tensor());
const_attrs.insert({"value", val});
AddOperationToEnqueueRequest(1, "Const", {}, const_attrs,
@@ -294,6 +294,77 @@ TEST_F(EagerServiceImplTest, BasicFunctionTest) {
&close_context_response));
}
+// Test creates a context and attempts to send a tensor (using the RPC), and
+// then use the tensor.
+TEST_F(EagerServiceImplTest, SendTensorTest) {
+ TestEagerServiceImpl eager_service_impl(&worker_env_);
+
+ CreateContextRequest request;
+ request.mutable_server_def()->set_job_name("localhost");
+ request.mutable_server_def()->set_task_index(0);
+ request.set_rendezvous_id(random::New64());
+ CreateContextResponse response;
+
+ TF_ASSERT_OK(eager_service_impl.CreateContext(&request, &response));
+
+ uint64 context_id = response.context_id();
+
+ SendTensorRequest send_tensor_request;
+ send_tensor_request.set_context_id(context_id);
+ send_tensor_request.set_op_id(1);
+ SetTensorProto(send_tensor_request.add_tensors());
+ SendTensorResponse send_tensor_response;
+
+ TF_ASSERT_OK(eager_service_impl.SendTensor(&send_tensor_request,
+ &send_tensor_response));
+
+ EnqueueRequest remote_enqueue_request;
+ remote_enqueue_request.set_context_id(context_id);
+ EnqueueResponse remote_enqueue_response;
+
+ std::unordered_map<string, AttrValue> attrs;
+ AttrValue val;
+ val.Clear();
+ val.set_type(tensorflow::DataType::DT_FLOAT);
+ attrs.insert({"T", val});
+ val.Clear();
+ val.set_b(false);
+ attrs.insert({"transpose_a", val});
+ attrs.insert({"transpose_b", val});
+
+ AddOperationToEnqueueRequest(2, "MatMul", {{1, 0}, {1, 0}}, attrs,
+ "/job:localhost/replica:0/task:0/device:CPU:0",
+ &remote_enqueue_request);
+
+ TF_ASSERT_OK(eager_service_impl.Enqueue(&remote_enqueue_request,
+ &remote_enqueue_response));
+
+ const tensorflow::Tensor* t = nullptr;
+ tensorflow::TensorHandle* tensor_handle;
+ TF_ASSERT_OK(eager_service_impl.GetTensorHandle(
+ response.context_id(), RemoteTensorHandleInternal(2, 0), &tensor_handle));
+ TF_ASSERT_OK(tensor_handle->Tensor(&t));
+
+ Device* device = nullptr;
+ TF_ASSERT_OK(tensor_handle->Device(&device));
+ EXPECT_NE(device, nullptr);
+ EXPECT_EQ(device->name(), "/job:localhost/replica:0/task:0/device:CPU:0");
+
+ auto actual = t->flat<float>();
+ EXPECT_EQ(4, actual.size());
+
+ EXPECT_EQ(7, actual(0));
+ EXPECT_EQ(10, actual(1));
+ EXPECT_EQ(15, actual(2));
+ EXPECT_EQ(22, actual(3));
+
+ CloseContextRequest close_context_request;
+ close_context_request.set_context_id(context_id);
+ CloseContextResponse close_context_response;
+ TF_ASSERT_OK(eager_service_impl.CloseContext(&close_context_request,
+ &close_context_response));
+}
+
} // namespace
} // namespace eager
} // namespace tensorflow
diff --git a/tensorflow/core/distributed_runtime/eager/remote_execute_node.h b/tensorflow/core/distributed_runtime/eager/remote_execute_node.h
index 28b68c3b88..0e3a68c4d8 100644
--- a/tensorflow/core/distributed_runtime/eager/remote_execute_node.h
+++ b/tensorflow/core/distributed_runtime/eager/remote_execute_node.h
@@ -29,8 +29,8 @@ namespace eager {
class RemoteExecuteNode : public tensorflow::EagerNode {
public:
RemoteExecuteNode(
- tensorflow::uint64 id, const tensorflow::eager::EnqueueRequest& request,
- tensorflow::eager::EagerClient* eager_client,
+ tensorflow::uint64 id, std::unique_ptr<EnqueueRequest> request,
+ EagerClient* eager_client,
const gtl::InlinedVector<TensorHandle*, 4>& inputs,
std::function<void(const Status& status, const EnqueueResponse& response)>
done_callback)
@@ -45,8 +45,8 @@ class RemoteExecuteNode : public tensorflow::EagerNode {
}
RemoteExecuteNode(tensorflow::uint64 id,
- const tensorflow::eager::EnqueueRequest& request,
- tensorflow::eager::EagerClient* eager_client)
+ std::unique_ptr<EnqueueRequest> request,
+ EagerClient* eager_client)
: tensorflow::EagerNode(id),
request_(std::move(request)),
eager_client_(eager_client) {}
@@ -58,10 +58,10 @@ class RemoteExecuteNode : public tensorflow::EagerNode {
}
tensorflow::Status Run() override {
- tensorflow::eager::EnqueueResponse response;
- tensorflow::Status status;
+ EnqueueResponse response;
+ Status status;
Notification n;
- eager_client_->EnqueueAsync(&request_, &response,
+ eager_client_->EnqueueAsync(request_.get(), &response,
[&n, &status](const tensorflow::Status& s) {
status.Update(s);
n.Notify();
@@ -76,9 +76,8 @@ class RemoteExecuteNode : public tensorflow::EagerNode {
}
private:
- EnqueueRequest request_;
- tensorflow::eager::EagerClient*
- eager_client_; // Not owned, and must outlive the RemoteExecuteNode.
+ std::unique_ptr<EnqueueRequest> request_;
+ EagerClient* eager_client_; // Not owned, and must outlive this node.
// This is required to ensure that the tensor handles stay alive across the
// execution.
diff --git a/tensorflow/core/distributed_runtime/graph_mgr.cc b/tensorflow/core/distributed_runtime/graph_mgr.cc
index e2f13df19f..6c146036ae 100644
--- a/tensorflow/core/distributed_runtime/graph_mgr.cc
+++ b/tensorflow/core/distributed_runtime/graph_mgr.cc
@@ -261,7 +261,7 @@ Status GraphMgr::InitItem(const string& session, const GraphDef& gdef,
optimizer.Optimize(lib, worker_env_->env, params.device, &subgraph,
/*shape_map=*/nullptr);
- // EXPERIMENTAL: tfdbg inserts debug nodes (i.e., probes) to the graph.
+ // TensorFlow Debugger (tfdbg) inserts debug nodes in the graph.
if (!debug_options.debug_tensor_watch_opts().empty()) {
TF_RETURN_IF_ERROR(DecorateAndPublishGraphForDebug(
debug_options, subgraph.get(), params.device));
diff --git a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc
index b23466037f..181422118c 100644
--- a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc
+++ b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc
@@ -49,6 +49,7 @@ class GrpcEagerClient : public EagerClient {
CLIENT_METHOD(KeepAlive);
CLIENT_METHOD(CloseContext);
CLIENT_METHOD(RegisterFunction);
+ CLIENT_METHOD(SendTensor);
#undef CLIENT_METHOD
diff --git a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.cc b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.cc
index 39ab6856c5..ab3aa3fd1d 100644
--- a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.cc
+++ b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.cc
@@ -36,6 +36,7 @@ static const char* grpcEagerService_method_names[] = {
"/tensorflow.eager.EagerService/KeepAlive",
"/tensorflow.eager.EagerService/CloseContext",
"/tensorflow.eager.EagerService/RegisterFunction",
+ "/tensorflow.eager.EagerService/SendTensor",
};
std::unique_ptr<EagerService::Stub> EagerService::NewStub(
@@ -62,7 +63,9 @@ EagerService::Stub::Stub(
::grpc::internal::RpcMethod::NORMAL_RPC, channel),
rpcmethod_RegisterFunction_(grpcEagerService_method_names[5],
::grpc::internal::RpcMethod::NORMAL_RPC,
- channel) {}
+ channel),
+ rpcmethod_SendTensor_(grpcEagerService_method_names[6],
+ ::grpc::internal::RpcMethod::NORMAL_RPC, channel) {}
::grpc::Status EagerService::Stub::CreateContext(
::grpc::ClientContext* context, const CreateContextRequest& request,
@@ -106,8 +109,15 @@ EagerService::Stub::Stub(
channel_.get(), rpcmethod_RegisterFunction_, context, request, response);
}
+::grpc::Status EagerService::Stub::SendTensor(::grpc::ClientContext* context,
+ const SendTensorRequest& request,
+ SendTensorResponse* response) {
+ return ::grpc::internal::BlockingUnaryCall(
+ channel_.get(), rpcmethod_SendTensor_, context, request, response);
+}
+
EagerService::AsyncService::AsyncService() {
- for (int i = 0; i < 6; ++i) {
+ for (int i = 0; i < 7; ++i) {
AddMethod(new ::grpc::internal::RpcServiceMethod(
grpcEagerService_method_names[i],
::grpc::internal::RpcMethod::NORMAL_RPC, nullptr));
diff --git a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h
index 66458186ad..521e0ac4fa 100644
--- a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h
+++ b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service.h
@@ -69,6 +69,9 @@ class EagerService final {
virtual ::grpc::Status RegisterFunction(
::grpc::ClientContext* context, const RegisterFunctionRequest& request,
RegisterFunctionResponse* response) = 0;
+ virtual ::grpc::Status SendTensor(::grpc::ClientContext* context,
+ const SendTensorRequest& request,
+ SendTensorResponse* response) = 0;
};
class Stub final : public StubInterface {
public:
@@ -91,6 +94,9 @@ class EagerService final {
::grpc::Status RegisterFunction(
::grpc::ClientContext* context, const RegisterFunctionRequest& request,
RegisterFunctionResponse* response) override;
+ ::grpc::Status SendTensor(::grpc::ClientContext* context,
+ const SendTensorRequest& request,
+ SendTensorResponse* response) override;
private:
std::shared_ptr< ::grpc::ChannelInterface> channel_;
@@ -100,6 +106,7 @@ class EagerService final {
const ::grpc::internal::RpcMethod rpcmethod_KeepAlive_;
const ::grpc::internal::RpcMethod rpcmethod_CloseContext_;
const ::grpc::internal::RpcMethod rpcmethod_RegisterFunction_;
+ const ::grpc::internal::RpcMethod rpcmethod_SendTensor_;
};
static std::unique_ptr<Stub> NewStub(
const std::shared_ptr< ::grpc::ChannelInterface>& channel,
@@ -157,6 +164,14 @@ class EagerService final {
::grpc::Service::RequestAsyncUnary(5, context, request, response,
new_call_cq, notification_cq, tag);
}
+ void RequestSendTensor(
+ ::grpc::ServerContext* context, SendTensorRequest* request,
+ ::grpc::ServerAsyncResponseWriter<SendTensorResponse>* response,
+ ::grpc::CompletionQueue* new_call_cq,
+ ::grpc::ServerCompletionQueue* notification_cq, void* tag) {
+ ::grpc::Service::RequestAsyncUnary(6, context, request, response,
+ new_call_cq, notification_cq, tag);
+ }
};
};
diff --git a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.cc b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.cc
index 52e06c263d..f511674e1f 100644
--- a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.cc
+++ b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.cc
@@ -27,9 +27,7 @@ namespace eager {
GrpcEagerServiceImpl::GrpcEagerServiceImpl(
const WorkerEnv* env, ::grpc::ServerBuilder* server_builder)
- : local_impl_(env) {
- request_handler_threadpool_ =
- MakeUnique<thread::ThreadPool>(env->env, "EagerServiceRequestHandler", 4);
+ : env_(env), local_impl_(env) {
server_builder->RegisterService(&service_);
cq_ = server_builder->AddCompletionQueue();
}
@@ -50,6 +48,7 @@ void GrpcEagerServiceImpl::HandleRPCsLoop() {
ENQUEUE_REQUEST(KeepAlive);
ENQUEUE_REQUEST(CloseContext);
ENQUEUE_REQUEST(RegisterFunction);
+ ENQUEUE_REQUEST(SendTensor);
#undef ENQUEUE_REQUEST
void* tag; // Matches the operation started against this cq_.
diff --git a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.h b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.h
index 9a94026342..537e9043bd 100644
--- a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.h
+++ b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_service_impl.h
@@ -45,7 +45,7 @@ class GrpcEagerServiceImpl : public AsyncServiceInterface {
private:
#define HANDLER(method) \
void method##Handler(EagerCall<method##Request, method##Response>* call) { \
- request_handler_threadpool_->Schedule([this, call]() { \
+ env_->compute_pool->Schedule([this, call]() { \
call->SendResponse( \
ToGrpcStatus(local_impl_.method(&call->request, &call->response))); \
}); \
@@ -62,8 +62,10 @@ class GrpcEagerServiceImpl : public AsyncServiceInterface {
HANDLER(KeepAlive);
HANDLER(CloseContext);
HANDLER(RegisterFunction);
+ HANDLER(SendTensor);
#undef HANDLER
+ const WorkerEnv* const env_; // Not owned.
EagerServiceImpl local_impl_;
std::unique_ptr<::grpc::Alarm> shutdown_alarm_;
@@ -71,8 +73,6 @@ class GrpcEagerServiceImpl : public AsyncServiceInterface {
std::unique_ptr<::grpc::ServerCompletionQueue> cq_;
tensorflow::eager::grpc::EagerService::AsyncService service_;
- std::unique_ptr<thread::ThreadPool> request_handler_threadpool_;
-
TF_DISALLOW_COPY_AND_ASSIGN(GrpcEagerServiceImpl);
};
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_session.cc b/tensorflow/core/distributed_runtime/rpc/grpc_session.cc
index fd1c150fa7..fdce1b10e0 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_session.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_session.cc
@@ -452,15 +452,12 @@ class GrpcSessionFactory : public SessionFactory {
return str_util::StartsWith(options.target, kSchemePrefix);
}
- Session* NewSession(const SessionOptions& options) override {
- std::unique_ptr<GrpcSession> ret;
- Status s = GrpcSession::Create(options, &ret);
- if (s.ok()) {
- return ret.release();
- } else {
- LOG(ERROR) << "Error during session construction: " << s.ToString();
- return nullptr;
- }
+ Status NewSession(const SessionOptions& options,
+ Session** out_session) override {
+ std::unique_ptr<GrpcSession> session;
+ TF_RETURN_IF_ERROR(GrpcSession::Create(options, &session));
+ *out_session = session.release();
+ return Status::OK();
}
// Invokes the session specific static method to reset containers.
diff --git a/tensorflow/core/framework/allocator.cc b/tensorflow/core/framework/allocator.cc
index 1c62d37955..888ed0c57b 100644
--- a/tensorflow/core/framework/allocator.cc
+++ b/tensorflow/core/framework/allocator.cc
@@ -91,6 +91,11 @@ void EnableCPUAllocatorFullStats(bool enable) {
cpu_allocator_collect_full_stats = enable;
}
+namespace {
+// A default Allocator for CPU devices. ProcessState::GetCPUAllocator() will
+// return a different version that may perform better, but may also lack the
+// optional stats triggered by the functions above. TODO(tucker): migrate all
+// uses of cpu_allocator() except tests to use ProcessState instead.
class CPUAllocator : public Allocator {
public:
CPUAllocator()
@@ -170,14 +175,42 @@ class CPUAllocator : public Allocator {
TF_DISALLOW_COPY_AND_ASSIGN(CPUAllocator);
};
+class CPUAllocatorFactory : public AllocatorFactory {
+ public:
+ Allocator* CreateAllocator() override { return new CPUAllocator; }
+
+ SubAllocator* CreateSubAllocator(int numa_node) override {
+ return new CPUSubAllocator(new CPUAllocator);
+ }
+
+ private:
+ class CPUSubAllocator : public SubAllocator {
+ public:
+ explicit CPUSubAllocator(CPUAllocator* cpu_allocator)
+ : cpu_allocator_(cpu_allocator) {}
+
+ void* Alloc(size_t alignment, size_t num_bytes) override {
+ return cpu_allocator_->AllocateRaw(alignment, num_bytes);
+ }
+
+ void Free(void* ptr, size_t num_bytes) override {
+ cpu_allocator_->DeallocateRaw(ptr);
+ }
+
+ private:
+ CPUAllocator* cpu_allocator_;
+ };
+};
+
+REGISTER_MEM_ALLOCATOR("DefaultCPUAllocator", 100, CPUAllocatorFactory);
+} // namespace
+
Allocator* cpu_allocator() {
- static Allocator* cpu_alloc = AllocatorRegistry::Global()->GetAllocator();
+ static Allocator* cpu_alloc =
+ AllocatorFactoryRegistry::singleton()->GetAllocator();
if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
cpu_alloc = new TrackingAllocator(cpu_alloc, true);
}
return cpu_alloc;
}
-
-REGISTER_MEM_ALLOCATOR("DefaultCPUAllocator", 100, CPUAllocator);
-
} // namespace tensorflow
diff --git a/tensorflow/core/framework/allocator.h b/tensorflow/core/framework/allocator.h
index 2bb4d32d57..774b1fe137 100644
--- a/tensorflow/core/framework/allocator.h
+++ b/tensorflow/core/framework/allocator.h
@@ -376,16 +376,18 @@ struct AllocatorAttributes {
int32 scope_id = 0;
};
-// Returns a trivial implementation of Allocator which uses the system
-// default malloc. The returned allocator is a process singleton.
+// Returns a trivial implementation of Allocator, which is a process singleton.
+// Access through this function is only intended for use in tests and auxiliary
+// processing. Performance sensitive uses should always obtain allocators from
+// ProcessState.
Allocator* cpu_allocator();
-// If 'enable' is true, the process-wide cpu allocator collects
+// If 'enable' is true, the default CPU allocator implementation will collect
// AllocatorStats. By default, it's disabled.
void EnableCPUAllocatorStats(bool enable);
-// If 'enable' is true, the process-wide cpu allocator collects full
-// statistics. By default, it's disabled.
+// If 'enable' is true, the default CPU allocator implementation will collect
+// full statistics. By default, it's disabled.
void EnableCPUAllocatorFullStats(bool enable);
// Abstract interface of an object that does the underlying suballoc/free of
diff --git a/tensorflow/core/framework/allocator_registry.cc b/tensorflow/core/framework/allocator_registry.cc
index 486be39ae3..099c4bacc8 100644
--- a/tensorflow/core/framework/allocator_registry.cc
+++ b/tensorflow/core/framework/allocator_registry.cc
@@ -21,60 +21,110 @@ limitations under the License.
namespace tensorflow {
// static
-AllocatorRegistry* AllocatorRegistry::Global() {
- static AllocatorRegistry* global_allocator_registry = new AllocatorRegistry;
- return global_allocator_registry;
+AllocatorFactoryRegistry* AllocatorFactoryRegistry::singleton() {
+ static AllocatorFactoryRegistry* singleton = new AllocatorFactoryRegistry;
+ return singleton;
}
-Allocator* AllocatorRegistry::GetRegisteredAllocator(const string& name,
- int priority) {
- for (auto entry : allocators_) {
+const AllocatorFactoryRegistry::FactoryEntry*
+AllocatorFactoryRegistry::FindEntry(const string& name, int priority) const {
+ for (auto& entry : factories_) {
if (!name.compare(entry.name) && priority == entry.priority) {
- return entry.allocator;
+ return &entry;
}
}
return nullptr;
}
-void AllocatorRegistry::Register(const string& name, int priority,
- Allocator* allocator) {
+void AllocatorFactoryRegistry::Register(const char* source_file,
+ int source_line, const string& name,
+ int priority,
+ AllocatorFactory* factory) {
+ mutex_lock l(mu_);
+ CHECK(!first_alloc_made_) << "Attempt to register an AllocatorFactory "
+ << "after call to GetAllocator()";
CHECK(!name.empty()) << "Need a valid name for Allocator";
CHECK_GE(priority, 0) << "Priority needs to be non-negative";
- Allocator* existing = GetRegisteredAllocator(name, priority);
+ const FactoryEntry* existing = FindEntry(name, priority);
if (existing != nullptr) {
- // A duplicate is if the registration name and priority match
- // but the Allocator::Name()'s don't match.
- CHECK_EQ(existing->Name(), allocator->Name())
- << "Allocator with name: [" << name << "], type [" << existing->Name()
- << "], priority: [" << priority
- << "] already registered. Choose a different name to register "
- << "an allocator of type " << allocator->Name();
-
- // The allocator names match, so we can just return.
- // It should be safe to delete the allocator since the caller
- // gives up ownership of it.
- delete allocator;
- return;
+ // Duplicate registration is a hard failure.
+ LOG(FATAL) << "New registration for AllocatorFactory with name=" << name
+ << " priority=" << priority << " at location " << source_file
+ << ":" << source_line
+ << " conflicts with previous registration at location "
+ << existing->source_file << ":" << existing->source_line;
}
- AllocatorRegistryEntry tmp_entry;
- tmp_entry.name = name;
- tmp_entry.priority = priority;
- tmp_entry.allocator = allocator;
+ FactoryEntry entry;
+ entry.source_file = source_file;
+ entry.source_line = source_line;
+ entry.name = name;
+ entry.priority = priority;
+ entry.factory.reset(factory);
+ factories_.push_back(std::move(entry));
+}
- allocators_.push_back(tmp_entry);
- int high_pri = -1;
- for (auto entry : allocators_) {
- if (high_pri < entry.priority) {
- m_curr_allocator_ = entry.allocator;
- high_pri = entry.priority;
+Allocator* AllocatorFactoryRegistry::GetAllocator() {
+ mutex_lock l(mu_);
+ first_alloc_made_ = true;
+ FactoryEntry* best_entry = nullptr;
+ for (auto& entry : factories_) {
+ if (best_entry == nullptr) {
+ best_entry = &entry;
+ } else if (entry.priority > best_entry->priority) {
+ best_entry = &entry;
}
}
+ if (best_entry) {
+ if (!best_entry->allocator) {
+ best_entry->allocator.reset(best_entry->factory->CreateAllocator());
+ }
+ return best_entry->allocator.get();
+ } else {
+ LOG(FATAL) << "No registered CPU AllocatorFactory";
+ return nullptr;
+ }
}
-Allocator* AllocatorRegistry::GetAllocator() {
- return CHECK_NOTNULL(m_curr_allocator_);
+SubAllocator* AllocatorFactoryRegistry::GetSubAllocator(int numa_node) {
+ mutex_lock l(mu_);
+ first_alloc_made_ = true;
+ FactoryEntry* best_entry = nullptr;
+ for (auto& entry : factories_) {
+ if (best_entry == nullptr) {
+ best_entry = &entry;
+ } else if (best_entry->factory->NumaEnabled()) {
+ if (entry.factory->NumaEnabled() &&
+ (entry.priority > best_entry->priority)) {
+ best_entry = &entry;
+ }
+ } else {
+ DCHECK(!best_entry->factory->NumaEnabled());
+ if (entry.factory->NumaEnabled() ||
+ (entry.priority > best_entry->priority)) {
+ best_entry = &entry;
+ }
+ }
+ }
+ if (best_entry) {
+ int index = 0;
+ if (numa_node != port::kNUMANoAffinity) {
+ CHECK_LE(numa_node, port::NUMANumNodes());
+ index = 1 + numa_node;
+ }
+ if (best_entry->sub_allocators.size() < (index + 1)) {
+ best_entry->sub_allocators.resize(index + 1);
+ }
+ if (!best_entry->sub_allocators[index].get()) {
+ best_entry->sub_allocators[index].reset(
+ best_entry->factory->CreateSubAllocator(numa_node));
+ }
+ return best_entry->sub_allocators[index].get();
+ } else {
+ LOG(FATAL) << "No registered CPU AllocatorFactory";
+ return nullptr;
+ }
}
} // namespace tensorflow
diff --git a/tensorflow/core/framework/allocator_registry.h b/tensorflow/core/framework/allocator_registry.h
index b26e79ac3b..24f282ce84 100644
--- a/tensorflow/core/framework/allocator_registry.h
+++ b/tensorflow/core/framework/allocator_registry.h
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// Classes to maintain a static registry of memory allocators
+// Classes to maintain a static registry of memory allocator factories.
#ifndef TENSORFLOW_CORE_FRAMEWORK_ALLOCATOR_REGISTRY_H_
#define TENSORFLOW_CORE_FRAMEWORK_ALLOCATOR_REGISTRY_H_
@@ -21,59 +21,100 @@ limitations under the License.
#include <vector>
#include "tensorflow/core/framework/allocator.h"
+#include "tensorflow/core/platform/numa.h"
namespace tensorflow {
-// A global AllocatorRegistry is used to hold allocators for CPU backends
-class AllocatorRegistry {
+class AllocatorFactory {
public:
- // Add an allocator to the registry. Caller releases ownership of
- // 'allocator'.
- void Register(const string& name, int priority, Allocator* allocator);
+ virtual ~AllocatorFactory() {}
- // Return allocator with highest priority
- // If multiple allocators have the same high priority, return one of them
+ // Returns true if the factory will create a functionally different
+ // SubAllocator for different (legal) values of numa_node.
+ virtual bool NumaEnabled() { return false; }
+
+ // Create an Allocator.
+ virtual Allocator* CreateAllocator() = 0;
+
+ // Create a SubAllocator. If NumaEnabled() is true, then returned SubAllocator
+ // will allocate memory local to numa_node. If numa_node == kNUMANoAffinity
+ // then allocated memory is not specific to any NUMA node.
+ virtual SubAllocator* CreateSubAllocator(int numa_node) = 0;
+};
+
+// A singleton registry of AllocatorFactories.
+//
+// Allocators should be obtained through ProcessState or cpu_allocator()
+// (deprecated), not directly through this interface. The purpose of this
+// registry is to allow link-time discovery of multiple AllocatorFactories among
+// which ProcessState will obtain the best fit at startup.
+class AllocatorFactoryRegistry {
+ public:
+ AllocatorFactoryRegistry() {}
+ ~AllocatorFactoryRegistry() {}
+
+ void Register(const char* source_file, int source_line, const string& name,
+ int priority, AllocatorFactory* factory);
+
+ // Returns 'best fit' Allocator. Find the factory with the highest priority
+ // and return an allocator constructed by it. If multiple factories have
+ // been registered with the same priority, picks one by unspecified criteria.
Allocator* GetAllocator();
- // Returns the global registry of allocators.
- static AllocatorRegistry* Global();
+ // Returns 'best fit' SubAllocator. First look for the highest priority
+ // factory that is NUMA-enabled. If none is registered, fall back to the
+ // highest priority non-NUMA-enabled factory. If NUMA-enabled, return a
+ // SubAllocator specific to numa_node, otherwise return a NUMA-insensitive
+ // SubAllocator.
+ SubAllocator* GetSubAllocator(int numa_node);
+
+ // Returns the singleton value.
+ static AllocatorFactoryRegistry* singleton();
private:
- typedef struct {
+ mutex mu_;
+ bool first_alloc_made_ = false;
+ struct FactoryEntry {
+ const char* source_file;
+ int source_line;
string name;
int priority;
- Allocator* allocator; // not owned
- } AllocatorRegistryEntry;
-
- // Returns the Allocator registered for 'name' and 'priority',
- // or 'nullptr' if not found.
- Allocator* GetRegisteredAllocator(const string& name, int priority);
-
- std::vector<AllocatorRegistryEntry> allocators_;
- Allocator* m_curr_allocator_; // not owned
+ std::unique_ptr<AllocatorFactory> factory;
+ std::unique_ptr<Allocator> allocator;
+ // Index 0 corresponds to kNUMANoAffinity, other indices are (numa_node +
+ // 1).
+ std::vector<std::unique_ptr<SubAllocator>> sub_allocators;
+ };
+ std::vector<FactoryEntry> factories_ GUARDED_BY(mu_);
+
+ // Returns any FactoryEntry registered under 'name' and 'priority',
+ // or 'nullptr' if none found.
+ const FactoryEntry* FindEntry(const string& name, int priority) const
+ EXCLUSIVE_LOCKS_REQUIRED(mu_);
+
+ TF_DISALLOW_COPY_AND_ASSIGN(AllocatorFactoryRegistry);
};
-namespace allocator_registration {
-
-class AllocatorRegistration {
+class AllocatorFactoryRegistration {
public:
- AllocatorRegistration(const string& name, int priority,
- Allocator* allocator) {
- AllocatorRegistry::Global()->Register(name, priority, allocator);
+ AllocatorFactoryRegistration(const char* file, int line, const string& name,
+ int priority, AllocatorFactory* factory) {
+ AllocatorFactoryRegistry::singleton()->Register(file, line, name, priority,
+ factory);
}
};
-} // namespace allocator_registration
-
-#define REGISTER_MEM_ALLOCATOR(name, priority, allocator) \
- REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(__COUNTER__, name, priority, allocator)
+#define REGISTER_MEM_ALLOCATOR(name, priority, factory) \
+ REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(__COUNTER__, __FILE__, __LINE__, name, \
+ priority, factory)
-#define REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(ctr, name, priority, allocator) \
- REGISTER_MEM_ALLOCATOR_UNIQ(ctr, name, priority, allocator)
+#define REGISTER_MEM_ALLOCATOR_UNIQ_HELPER(ctr, file, line, name, priority, \
+ factory) \
+ REGISTER_MEM_ALLOCATOR_UNIQ(ctr, file, line, name, priority, factory)
-#define REGISTER_MEM_ALLOCATOR_UNIQ(ctr, name, priority, allocator) \
- static allocator_registration::AllocatorRegistration \
- register_allocator_##ctr(name, priority, new allocator)
+#define REGISTER_MEM_ALLOCATOR_UNIQ(ctr, file, line, name, priority, factory) \
+ static AllocatorFactoryRegistration allocator_factory_reg_##ctr( \
+ file, line, name, priority, new factory)
} // namespace tensorflow
diff --git a/tensorflow/core/framework/device_base.h b/tensorflow/core/framework/device_base.h
index 922d34fac9..b184fd91e1 100644
--- a/tensorflow/core/framework/device_base.h
+++ b/tensorflow/core/framework/device_base.h
@@ -184,9 +184,7 @@ class DeviceBase {
virtual ScopedAllocatorMgr* GetScopedAllocatorMgr() const { return nullptr; }
- const bool has_eigen_cpu_device() const {
- return !eigen_cpu_devices_.empty();
- }
+ bool has_eigen_cpu_device() const { return !eigen_cpu_devices_.empty(); }
virtual const Eigen::ThreadPoolDevice* eigen_cpu_device();
diff --git a/tensorflow/core/framework/function.cc b/tensorflow/core/framework/function.cc
index 88d9d65f5a..57bcc0f513 100644
--- a/tensorflow/core/framework/function.cc
+++ b/tensorflow/core/framework/function.cc
@@ -865,12 +865,15 @@ Status FunctionCallFrame::GetRetvals(std::vector<Tensor>* rets) const {
return Status::OK();
}
-Status FunctionCallFrame::ConsumeRetvals(std::vector<Tensor>* rets) {
+Status FunctionCallFrame::ConsumeRetvals(std::vector<Tensor>* rets,
+ bool allow_dead_tensors) {
rets->clear();
rets->reserve(rets_.size());
for (size_t i = 0; i < rets_.size(); ++i) {
if (rets_[i].has_val) {
rets->emplace_back(std::move(rets_[i].val));
+ } else if (allow_dead_tensors) {
+ rets->emplace_back();
} else {
return errors::Internal("Retval[", i, "] does not have value");
}
diff --git a/tensorflow/core/framework/function.h b/tensorflow/core/framework/function.h
index 8e607b927c..5da9af7db3 100644
--- a/tensorflow/core/framework/function.h
+++ b/tensorflow/core/framework/function.h
@@ -261,7 +261,10 @@ class FunctionCallFrame : public CallFrameInterface {
// Caller methods.
Status SetArgs(gtl::ArraySlice<Tensor> args);
Status GetRetvals(std::vector<Tensor>* rets) const;
- Status ConsumeRetvals(std::vector<Tensor>* rets);
+
+ // Moves the return values from the frame to rets. If allow_dead_tensors is
+ // false it will fail if any of the retvals do not have a value.
+ Status ConsumeRetvals(std::vector<Tensor>* rets, bool allow_dead_tensors);
size_t num_args() const override { return arg_types_.size(); }
size_t num_retvals() const override { return ret_types_.size(); }
@@ -510,6 +513,9 @@ class FunctionLibraryRuntime {
// If true, we create a new IntraProcessRendezvous, else use the existing
// one.
bool create_rendezvous = false;
+
+ // If True, allow returning dead tensors.
+ bool allow_dead_tensors = false;
};
typedef std::function<void(const Status&)> DoneCallback;
virtual void Run(const Options& opts, Handle handle,
diff --git a/tensorflow/core/framework/op_kernel.cc b/tensorflow/core/framework/op_kernel.cc
index 58feec90f0..507aa9e447 100644
--- a/tensorflow/core/framework/op_kernel.cc
+++ b/tensorflow/core/framework/op_kernel.cc
@@ -1061,40 +1061,51 @@ Status SupportedDeviceTypesForNode(
}
void LogAllRegisteredKernels() {
- for (const auto& key_registration : *GlobalKernelRegistryTyped()) {
- const KernelDef& kernel_def(key_registration.second.def);
+ KernelList kernel_list = GetAllRegisteredKernels();
+ for (const auto& kernel_def : kernel_list.kernel()) {
LOG(INFO) << "OpKernel ('" << ProtoShortDebugString(kernel_def) << "')";
}
}
KernelList GetAllRegisteredKernels() {
+ return GetFilteredRegisteredKernels([](const KernelDef& k) { return true; });
+}
+
+KernelList GetFilteredRegisteredKernels(
+ const std::function<bool(const KernelDef&)>& predicate) {
const KernelRegistry* const typed_registry = GlobalKernelRegistryTyped();
KernelList kernel_list;
kernel_list.mutable_kernel()->Reserve(typed_registry->size());
for (const auto& p : *typed_registry) {
- *kernel_list.add_kernel() = p.second.def;
+ const KernelDef& kernel_def = p.second.def;
+ if (predicate(kernel_def)) {
+ *kernel_list.add_kernel() = kernel_def;
+ }
}
return kernel_list;
}
+KernelList GetRegisteredKernelsForOp(StringPiece op_name) {
+ auto op_pred = [op_name](const KernelDef& k) { return k.op() == op_name; };
+ return GetFilteredRegisteredKernels(op_pred);
+}
+
string KernelsRegisteredForOp(StringPiece op_name) {
+ KernelList kernel_list = GetRegisteredKernelsForOp(op_name);
+ if (kernel_list.kernel_size() == 0) return " <no registered kernels>\n";
string ret;
- for (const auto& key_registration : *GlobalKernelRegistryTyped()) {
- const KernelDef& kernel_def(key_registration.second.def);
- if (kernel_def.op() == op_name) {
- strings::StrAppend(&ret, " device='", kernel_def.device_type(), "'");
- if (!kernel_def.label().empty()) {
- strings::StrAppend(&ret, "; label='", kernel_def.label(), "'");
- }
- for (int i = 0; i < kernel_def.constraint_size(); ++i) {
- strings::StrAppend(
- &ret, "; ", kernel_def.constraint(i).name(), " in ",
- SummarizeAttrValue(kernel_def.constraint(i).allowed_values()));
- }
- strings::StrAppend(&ret, "\n");
+ for (const auto& kernel_def : kernel_list.kernel()) {
+ strings::StrAppend(&ret, " device='", kernel_def.device_type(), "'");
+ if (!kernel_def.label().empty()) {
+ strings::StrAppend(&ret, "; label='", kernel_def.label(), "'");
+ }
+ for (int i = 0; i < kernel_def.constraint_size(); ++i) {
+ strings::StrAppend(
+ &ret, "; ", kernel_def.constraint(i).name(), " in ",
+ SummarizeAttrValue(kernel_def.constraint(i).allowed_values()));
}
+ strings::StrAppend(&ret, "\n");
}
- if (ret.empty()) return " <no registered kernels>\n";
return ret;
}
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index 6c4c3a2ac1..1fc5e9908e 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -1044,7 +1044,6 @@ class OpKernelContext {
// For control flow.
FrameAndIter frame_iter() const { return params_->frame_iter; }
bool is_input_dead() const { return params_->is_input_dead; }
- bool* is_output_dead() { return &is_output_dead_; }
// May be used, e.g., to get GPU handles, etc.
// TODO(tucker): Add example usage.
@@ -1143,8 +1142,6 @@ class OpKernelContext {
// Constructed only if <params->record_tensor_accesses>.
ManualConstructor<UniqueTensorReferences> referenced_tensors_ GUARDED_BY(mu_);
- bool is_output_dead_ = false;
-
// The following data members are only used when allocation tracking is
// enabled.
mutable mutex stats_mu_;
@@ -1307,6 +1304,13 @@ void LogAllRegisteredKernels();
// Gets a list of all registered kernels.
KernelList GetAllRegisteredKernels();
+// Gets a list of all registered kernels for which predicate returns true
+KernelList GetFilteredRegisteredKernels(
+ const std::function<bool(const KernelDef&)>& predicate);
+
+// Gets a list of all registered kernels for a given op
+KernelList GetRegisteredKernelsForOp(StringPiece op_name);
+
namespace kernel_factory {
class OpKernelRegistrar {
diff --git a/tensorflow/core/framework/op_kernel_test.cc b/tensorflow/core/framework/op_kernel_test.cc
index b76a3400a8..83dda6579b 100644
--- a/tensorflow/core/framework/op_kernel_test.cc
+++ b/tensorflow/core/framework/op_kernel_test.cc
@@ -965,7 +965,8 @@ BENCHMARK(BM_ConcatInputRange);
BENCHMARK(BM_SelectInputRange);
TEST(RegisteredKernels, CanCallGetAllRegisteredKernels) {
- auto all_registered_kernels = GetAllRegisteredKernels().kernel();
+ auto kernel_list = GetAllRegisteredKernels();
+ auto all_registered_kernels = kernel_list.kernel();
auto has_name_test1 = [](const KernelDef& k) { return k.op() == "Test1"; };
// Verify we can find the "Test1" op registered above
@@ -986,5 +987,20 @@ TEST(RegisteredKernels, CanLogAllRegisteredKernels) {
tensorflow::LogAllRegisteredKernels();
}
+TEST(RegisteredKernels, GetFilteredRegisteredKernels) {
+ auto has_name_test1 = [](const KernelDef& k) { return k.op() == "Test1"; };
+ auto kernel_list = GetFilteredRegisteredKernels(has_name_test1);
+ ASSERT_EQ(kernel_list.kernel_size(), 1);
+ EXPECT_EQ(kernel_list.kernel(0).op(), "Test1");
+ EXPECT_EQ(kernel_list.kernel(0).device_type(), "CPU");
+}
+
+TEST(RegisteredKernels, GetRegisteredKernelsForOp) {
+ auto kernel_list = GetRegisteredKernelsForOp("Test1");
+ ASSERT_EQ(kernel_list.kernel_size(), 1);
+ EXPECT_EQ(kernel_list.kernel(0).op(), "Test1");
+ EXPECT_EQ(kernel_list.kernel(0).device_type(), "CPU");
+}
+
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/framework/register_types.h b/tensorflow/core/framework/register_types.h
index e90596980f..f1cd37ecda 100644
--- a/tensorflow/core/framework/register_types.h
+++ b/tensorflow/core/framework/register_types.h
@@ -151,6 +151,12 @@ limitations under the License.
// Defines for sets of types.
+// TODO(b/111604096): Add uint32 and uint64 to TF_CALL_INTEGRAL_TYPES.
+//
+// The uint32 and uint64 types were introduced in 10/2017 to be used via XLA and
+// thus were not included in TF_CALL_INTEGRAL_TYPES. Including them in
+// TF_CALL_INTEGRAL_TYPES should only happen after evaluating the effect on the
+// TF binary size and performance.
#define TF_CALL_INTEGRAL_TYPES(m) \
TF_CALL_int64(m) TF_CALL_int32(m) TF_CALL_uint16(m) TF_CALL_int16(m) \
TF_CALL_uint8(m) TF_CALL_int8(m)
diff --git a/tensorflow/core/framework/tensor.h b/tensorflow/core/framework/tensor.h
index d2f2609d3b..1b19ab5da3 100644
--- a/tensorflow/core/framework/tensor.h
+++ b/tensorflow/core/framework/tensor.h
@@ -482,6 +482,7 @@ class Tensor {
friend class VariableOp; // For access to set_shape
friend class AutoReloadVariableOp; // For access to set_shape
friend class TensorTestHelper; // For access to set_shape
+ friend class CastOpBase; // For access to set_dtype;
friend class OpKernelContext; // For access to RefCountIsOne().
friend class ScopedAllocator; // For access to buf_.
friend class XlaTensor; // For access to RefCountIsOne().
diff --git a/tensorflow/core/graph/algorithm.cc b/tensorflow/core/graph/algorithm.cc
index 4652fbe406..9b4200e0b4 100644
--- a/tensorflow/core/graph/algorithm.cc
+++ b/tensorflow/core/graph/algorithm.cc
@@ -25,7 +25,8 @@ namespace tensorflow {
void DFS(const Graph& g, const std::function<void(Node*)>& enter,
const std::function<void(Node*)>& leave,
- const NodeComparator& stable_comparator) {
+ const NodeComparator& stable_comparator,
+ const EdgeFilter& edge_filter) {
// Stack of work to do.
struct Work {
Node* node;
@@ -52,7 +53,6 @@ void DFS(const Graph& g, const std::function<void(Node*)>& enter,
// Arrange to call leave(n) when all done with descendants.
if (leave) stack.push_back(Work{n, true});
- gtl::iterator_range<NeighborIter> nodes = n->out_nodes();
auto add_work = [&visited, &stack](Node* out) {
if (!visited[out->id()]) {
// Note; we must not mark as visited until we actually process it.
@@ -62,16 +62,20 @@ void DFS(const Graph& g, const std::function<void(Node*)>& enter,
if (stable_comparator) {
std::vector<Node*> nodes_sorted;
- for (Node* out : nodes) {
- nodes_sorted.emplace_back(out);
+ for (const Edge* out_edge : n->out_edges()) {
+ if (!edge_filter || edge_filter(*out_edge)) {
+ nodes_sorted.emplace_back(out_edge->dst());
+ }
}
std::sort(nodes_sorted.begin(), nodes_sorted.end(), stable_comparator);
for (Node* out : nodes_sorted) {
add_work(out);
}
} else {
- for (Node* out : nodes) {
- add_work(out);
+ for (const Edge* out_edge : n->out_edges()) {
+ if (!edge_filter || edge_filter(*out_edge)) {
+ add_work(out_edge->dst());
+ }
}
}
}
@@ -118,8 +122,6 @@ void ReverseDFSFromHelper(const Graph& g, gtl::ArraySlice<T> start,
// Arrange to call leave(n) when all done with descendants.
if (leave) stack.push_back(Work{n, true});
- gtl::iterator_range<NeighborIter> nodes = n->in_nodes();
-
auto add_work = [&visited, &stack](T out) {
if (!visited[out->id()]) {
// Note; we must not mark as visited until we actually process it.
@@ -129,16 +131,16 @@ void ReverseDFSFromHelper(const Graph& g, gtl::ArraySlice<T> start,
if (stable_comparator) {
std::vector<T> nodes_sorted;
- for (T in : nodes) {
- nodes_sorted.emplace_back(in);
+ for (const Edge* in_edge : n->in_edges()) {
+ nodes_sorted.emplace_back(in_edge->src());
}
std::sort(nodes_sorted.begin(), nodes_sorted.end(), stable_comparator);
for (T in : nodes_sorted) {
add_work(in);
}
} else {
- for (T in : nodes) {
- add_work(in);
+ for (const Edge* in_edge : n->in_edges()) {
+ add_work(in_edge->src());
}
}
}
@@ -161,14 +163,17 @@ void ReverseDFSFrom(const Graph& g, gtl::ArraySlice<Node*> start,
}
void GetPostOrder(const Graph& g, std::vector<Node*>* order,
- const NodeComparator& stable_comparator) {
+ const NodeComparator& stable_comparator,
+ const EdgeFilter& edge_filter) {
order->clear();
- DFS(g, nullptr, [order](Node* n) { order->push_back(n); }, stable_comparator);
+ DFS(g, nullptr, [order](Node* n) { order->push_back(n); }, stable_comparator,
+ edge_filter);
}
void GetReversePostOrder(const Graph& g, std::vector<Node*>* order,
- const NodeComparator& stable_comparator) {
- GetPostOrder(g, order, stable_comparator);
+ const NodeComparator& stable_comparator,
+ const EdgeFilter& edge_filter) {
+ GetPostOrder(g, order, stable_comparator, edge_filter);
std::reverse(order->begin(), order->end());
}
diff --git a/tensorflow/core/graph/algorithm.h b/tensorflow/core/graph/algorithm.h
index ac4a099013..5bbbc6f6dc 100644
--- a/tensorflow/core/graph/algorithm.h
+++ b/tensorflow/core/graph/algorithm.h
@@ -28,6 +28,8 @@ namespace tensorflow {
// Comparator for two nodes. This is used in order to get a stable ording.
using NodeComparator = std::function<bool(const Node*, const Node*)>;
+using EdgeFilter = std::function<bool(const Edge&)>;
+
// Compares two node based on their ids.
struct NodeComparatorID {
bool operator()(const Node* n1, const Node* n2) const {
@@ -47,9 +49,11 @@ struct NodeComparatorName {
// If leave is not empty, calls leave(n) after visiting all children of n.
// If stable_comparator is set, a stable ordering of visit is achieved by
// sorting a node's neighbors first before visiting them.
+// If edge_filter is set then ignores edges for which edge_filter returns false.
extern void DFS(const Graph& g, const std::function<void(Node*)>& enter,
const std::function<void(Node*)>& leave,
- const NodeComparator& stable_comparator = {});
+ const NodeComparator& stable_comparator = {},
+ const EdgeFilter& edge_filter = {});
// Perform a reverse depth-first-search on g starting at the sink node.
// If enter is not empty, calls enter(n) before visiting any parents of n.
@@ -83,15 +87,21 @@ extern void ReverseDFSFrom(const Graph& g, gtl::ArraySlice<const Node*> start,
// If stable_comparator is set, a stable ordering of visit is achieved by
// sorting a node's neighbors first before visiting them.
//
+// If edge_filter is set then ignores edges for which edge_filter returns false.
+//
// REQUIRES: order is not NULL.
void GetPostOrder(const Graph& g, std::vector<Node*>* order,
- const NodeComparator& stable_comparator = {});
+ const NodeComparator& stable_comparator = {},
+ const EdgeFilter& edge_filter = {});
// Stores in *order the reverse post-order numbering of all nodes
// If stable_comparator is set, a stable ordering of visit is achieved by
// sorting a node's neighbors first before visiting them.
+//
+// If edge_filter is set then ignores edges for which edge_filter returns false.
void GetReversePostOrder(const Graph& g, std::vector<Node*>* order,
- const NodeComparator& stable_comparator = {});
+ const NodeComparator& stable_comparator = {},
+ const EdgeFilter& edge_filter = {});
// Prune nodes in "g" that are not in some path from the source node
// to any node in 'nodes'. Returns true if changes were made to the graph.
diff --git a/tensorflow/core/graph/algorithm_test.cc b/tensorflow/core/graph/algorithm_test.cc
index f67d5a2fd2..60a3e66aa1 100644
--- a/tensorflow/core/graph/algorithm_test.cc
+++ b/tensorflow/core/graph/algorithm_test.cc
@@ -36,6 +36,11 @@ namespace {
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
+REGISTER_OP("TestUnary").Input("a: float").Output("o: float");
+REGISTER_OP("TestBinary")
+ .Input("a: float")
+ .Input("b: float")
+ .Output("o: float");
// Compares that the order of nodes in 'inputs' respects the
// pair orders described in 'ordered_pairs'.
@@ -148,5 +153,52 @@ TEST(AlgorithmTest, ReversePostOrderStable) {
EXPECT_TRUE(ExpectBefore({{"t2", "t3"}}, order, &error));
}
}
+
+TEST(AlgorithmTest, PostOrderWithEdgeFilter) {
+ GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
+ string error;
+ Node* n0 = ops::SourceOp("TestParams", b.opts().WithName("n0"));
+ Node* n1 = ops::UnaryOp("TestUnary", n0, b.opts().WithName("n1"));
+ Node* n2 = ops::UnaryOp("TestUnary", n1, b.opts().WithName("n2"));
+ Node* n3 = ops::BinaryOp("TestBinary", n2, n0, b.opts().WithName("n3"));
+
+ Graph g(OpRegistry::Global());
+ TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
+
+ g.AddEdge(g.FindNodeId(n3->id()), 0, g.FindNodeId(n1->id()), 1);
+
+ std::vector<Node*> post_order;
+ auto edge_filter = [&](const Edge& e) {
+ return !(e.src()->id() == n3->id() && e.dst()->id() == n1->id());
+ };
+
+ std::vector<Node*> expected_post_order = {
+ g.sink_node(), g.FindNodeId(n3->id()), g.FindNodeId(n2->id()),
+ g.FindNodeId(n1->id()), g.FindNodeId(n0->id()), g.source_node()};
+
+ std::vector<Node*> expected_reverse_post_order = expected_post_order;
+ std::reverse(expected_reverse_post_order.begin(),
+ expected_reverse_post_order.end());
+
+ GetPostOrder(g, &post_order, /*stable_comparator=*/{},
+ /*edge_filter=*/edge_filter);
+
+ ASSERT_EQ(expected_post_order.size(), post_order.size());
+ for (int i = 0; i < post_order.size(); i++) {
+ CHECK_EQ(post_order[i], expected_post_order[i])
+ << post_order[i]->name() << " vs. " << expected_post_order[i]->name();
+ }
+
+ std::vector<Node*> reverse_post_order;
+ GetReversePostOrder(g, &reverse_post_order, /*stable_comparator=*/{},
+ /*edge_filter=*/edge_filter);
+
+ ASSERT_EQ(expected_reverse_post_order.size(), reverse_post_order.size());
+ for (int i = 0; i < reverse_post_order.size(); i++) {
+ CHECK_EQ(reverse_post_order[i], expected_reverse_post_order[i])
+ << reverse_post_order[i]->name() << " vs. "
+ << expected_reverse_post_order[i]->name();
+ }
+}
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/graph/graph_constructor.cc b/tensorflow/core/graph/graph_constructor.cc
index add26f3b71..8c73f8f712 100644
--- a/tensorflow/core/graph/graph_constructor.cc
+++ b/tensorflow/core/graph/graph_constructor.cc
@@ -1042,6 +1042,14 @@ Status GraphConstructor::Convert() {
}
if (processed < node_defs_.size()) {
+ LOG(WARNING) << "IN " << __func__ << (node_defs_.size() - processed)
+ << " NODES IN A CYCLE";
+ for (int64 i = 0; i < node_defs_.size(); i++) {
+ if (pending_count_[i] != 0) {
+ LOG(WARNING) << "PENDING: " << SummarizeNodeDef(*node_defs_[i])
+ << "WITH PENDING COUNT = " << pending_count_[i];
+ }
+ }
return errors::InvalidArgument(node_defs_.size() - processed,
" nodes in a cycle");
}
diff --git a/tensorflow/core/grappler/clusters/cluster.cc b/tensorflow/core/grappler/clusters/cluster.cc
index 8d8c6084ec..6d84283e68 100644
--- a/tensorflow/core/grappler/clusters/cluster.cc
+++ b/tensorflow/core/grappler/clusters/cluster.cc
@@ -29,6 +29,14 @@ void Cluster::AllowSoftPlacement(bool soft_placement_state) {
options_.config.set_allow_soft_placement(soft_placement_state);
}
+void Cluster::SetNumInterOpThreads(int num_threads) {
+ for (int i = 0; i < options_.config.session_inter_op_thread_pool_size();
+ ++i) {
+ options_.config.mutable_session_inter_op_thread_pool(i)->set_num_threads(
+ num_threads);
+ }
+}
+
void Cluster::SetNumWarmupSteps(int num_steps) {
options_.config.mutable_graph_options()->set_build_cost_model_after(
num_steps);
diff --git a/tensorflow/core/grappler/clusters/cluster.h b/tensorflow/core/grappler/clusters/cluster.h
index 06db36b3aa..e94fb900c0 100644
--- a/tensorflow/core/grappler/clusters/cluster.h
+++ b/tensorflow/core/grappler/clusters/cluster.h
@@ -65,6 +65,9 @@ class Cluster {
// with reftype input(s) which are from CPU.
void AllowSoftPlacement(bool soft_placement_state);
+ // Update the number of inter-op threads for each per-session threadpool
+ void SetNumInterOpThreads(int num_threads);
+
// Set the number of steps required to warmup TensorFlow. Must be called
// before Provision().
void SetNumWarmupSteps(int num_steps);
diff --git a/tensorflow/core/grappler/costs/graph_properties.cc b/tensorflow/core/grappler/costs/graph_properties.cc
index 83a8326e79..231c7c63be 100644
--- a/tensorflow/core/grappler/costs/graph_properties.cc
+++ b/tensorflow/core/grappler/costs/graph_properties.cc
@@ -496,18 +496,11 @@ class SymbolicShapeRefiner {
"supported.");
}
+ // It is guaranteed that output_tensors does not contain any control
+ // inputs, so port_id >= 0.
string out_tensor = out_arg.output_tensors[0];
- auto out_tensor_pieces = str_util::Split(out_tensor, ",");
- string node_name = out_tensor_pieces[0];
int port_id;
-
- // Check if port_id was included in out_tensor
- if (out_tensor_pieces.size() <= 1) {
- port_id = 0;
- } else if (!strings::safe_strto32(out_tensor_pieces[1], &port_id)) {
- return errors::FailedPrecondition(
- "Failed string to integer conversion for ", out_tensor_pieces[1]);
- }
+ string node_name = ParseNodeName(out_tensor, &port_id);
const NodeDef* retnode = gv.GetNode(node_name);
if (retnode == nullptr) {
@@ -516,6 +509,11 @@ class SymbolicShapeRefiner {
}
auto output_properties = gp.GetOutputProperties(retnode->name());
+ if (port_id >= output_properties.size()) {
+ return errors::InvalidArgument(
+ out_tensor, " has invalid position ", port_id,
+ " (output_properties.size() = ", output_properties.size(), ").");
+ }
auto const& outprop = output_properties[port_id];
const TensorShapeProto& shape = outprop.shape();
ShapeHandle out;
diff --git a/tensorflow/core/grappler/costs/graph_properties_test.cc b/tensorflow/core/grappler/costs/graph_properties_test.cc
index 1be19d291a..5acfb56b05 100644
--- a/tensorflow/core/grappler/costs/graph_properties_test.cc
+++ b/tensorflow/core/grappler/costs/graph_properties_test.cc
@@ -887,6 +887,44 @@ TEST_F(GraphPropertiesTest, LargeFunctionStaticShapeInference) {
EXPECT_EQ(8, in_prop3.shape().dim(3).size());
}
+TEST_F(GraphPropertiesTest, LargeFunctionWithMultipleOutputs) {
+ // Test graph produced in python using:
+ /*
+ @function.Defun(noinline=True)
+ def MyFunc():
+ @function.Defun(*[tf.float32] * 2)
+ def Cond(n, unused_x):
+ return n > 0
+
+ @function.Defun(*[tf.float32] * 2)
+ def Body(n, x):
+ return n - 1, x + n
+
+ i = tf.constant(10)
+ return functional_ops.While([i, 0.], Cond, Body)
+
+ with tf.Graph().as_default():
+ z = MyFunc()
+ */
+ GrapplerItem item;
+ string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
+ "function_functional_while.pbtxt");
+ TF_CHECK_OK(ReadGraphDefFromFile(filename, &item.graph));
+ GraphProperties properties(item);
+ TF_CHECK_OK(properties.InferStatically(false));
+
+ const auto out_props = properties.GetOutputProperties("MyFunc_AenMyWWx1Us");
+ EXPECT_EQ(2, out_props.size());
+
+ const OpInfo::TensorProperties& out_prop0 = out_props[0];
+ EXPECT_EQ(DT_INT32, out_prop0.dtype());
+ EXPECT_FALSE(out_prop0.shape().unknown_rank());
+
+ const OpInfo::TensorProperties& out_prop1 = out_props[1];
+ EXPECT_EQ(DT_FLOAT, out_prop1.dtype());
+ EXPECT_FALSE(out_prop1.shape().unknown_rank());
+}
+
TEST_F(GraphPropertiesTest, FunctionWithErrorStaticShapeInference) {
GrapplerItem item;
string filename = io::JoinPath(testing::TensorFlowSrcRoot(), kTestDataPath,
diff --git a/tensorflow/core/grappler/costs/graph_properties_testdata/function_functional_while.pbtxt b/tensorflow/core/grappler/costs/graph_properties_testdata/function_functional_while.pbtxt
new file mode 100644
index 0000000000..c94ee2f227
--- /dev/null
+++ b/tensorflow/core/grappler/costs/graph_properties_testdata/function_functional_while.pbtxt
@@ -0,0 +1,239 @@
+node {
+ name: "MyFunc_AenMyWWx1Us"
+ op: "MyFunc_AenMyWWx1Us"
+}
+library {
+ function {
+ signature {
+ name: "MyFunc_AenMyWWx1Us"
+ output_arg {
+ name: "while"
+ type: DT_INT32
+ }
+ output_arg {
+ name: "while_0"
+ type: DT_FLOAT
+ }
+ is_stateful: true
+ }
+ node_def {
+ name: "Const"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_INT32
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_INT32
+ tensor_shape {
+ }
+ int_val: 10
+ }
+ }
+ }
+ }
+ node_def {
+ name: "While/input_1"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node_def {
+ name: "While"
+ op: "While"
+ input: "Const:output:0"
+ input: "While/input_1:output:0"
+ attr {
+ key: "T"
+ value {
+ list {
+ type: DT_INT32
+ type: DT_FLOAT
+ }
+ }
+ }
+ attr {
+ key: "body"
+ value {
+ func {
+ name: "Body_8GOMGeZeK5c"
+ }
+ }
+ }
+ attr {
+ key: "cond"
+ value {
+ func {
+ name: "Cond_Xf5ttAHgUCg"
+ }
+ }
+ }
+ }
+ ret {
+ key: "while"
+ value: "While:output:0"
+ }
+ ret {
+ key: "while_0"
+ value: "While:output:1"
+ }
+ attr {
+ key: "_noinline"
+ value {
+ b: true
+ }
+ }
+ }
+ function {
+ signature {
+ name: "Body_8GOMGeZeK5c"
+ input_arg {
+ name: "n"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "x"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "sub"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "add"
+ type: DT_FLOAT
+ }
+ }
+ node_def {
+ name: "sub/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 1.0
+ }
+ }
+ }
+ }
+ node_def {
+ name: "sub_0"
+ op: "Sub"
+ input: "n"
+ input: "sub/y:output:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ node_def {
+ name: "add_0"
+ op: "Add"
+ input: "x"
+ input: "n"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ ret {
+ key: "add"
+ value: "add_0:z:0"
+ }
+ ret {
+ key: "sub"
+ value: "sub_0:z:0"
+ }
+ }
+ function {
+ signature {
+ name: "Cond_Xf5ttAHgUCg"
+ input_arg {
+ name: "n"
+ type: DT_FLOAT
+ }
+ input_arg {
+ name: "unused_x"
+ type: DT_FLOAT
+ }
+ output_arg {
+ name: "greater"
+ type: DT_BOOL
+ }
+ }
+ node_def {
+ name: "Greater/y"
+ op: "Const"
+ attr {
+ key: "dtype"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ attr {
+ key: "value"
+ value {
+ tensor {
+ dtype: DT_FLOAT
+ tensor_shape {
+ }
+ float_val: 0.0
+ }
+ }
+ }
+ }
+ node_def {
+ name: "Greater"
+ op: "Greater"
+ input: "n"
+ input: "Greater/y:output:0"
+ attr {
+ key: "T"
+ value {
+ type: DT_FLOAT
+ }
+ }
+ }
+ ret {
+ key: "greater"
+ value: "Greater:z:0"
+ }
+ }
+}
+versions {
+ producer: 26
+ min_consumer: 12
+}
diff --git a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc
index d34eecd009..5b303f6ccb 100644
--- a/tensorflow/core/grappler/costs/op_level_cost_estimator.cc
+++ b/tensorflow/core/grappler/costs/op_level_cost_estimator.cc
@@ -65,6 +65,7 @@ constexpr char kAvgPool[] = "AvgPool";
constexpr char kAvgPoolGrad[] = "AvgPoolGrad";
constexpr char kFusedBatchNorm[] = "FusedBatchNorm";
constexpr char kFusedBatchNormGrad[] = "FusedBatchNormGrad";
+constexpr char kQuantizedMatMulV2[] = "QuantizedMatMulV2";
static const Costs::Duration kMinComputeTime(1);
@@ -226,6 +227,7 @@ OpLevelCostEstimator::OpLevelCostEstimator() {
{kMatMul, wrap(&OpLevelCostEstimator::PredictMatMul)},
{kSparseMatMul, wrap(&OpLevelCostEstimator::PredictMatMul)},
{kBatchMatMul, wrap(&OpLevelCostEstimator::PredictBatchMatMul)},
+ {kQuantizedMatMulV2, wrap(&OpLevelCostEstimator::PredictMatMul)},
{kNoOp, wrap(&OpLevelCostEstimator::PredictNoOp)},
{kGuaranteeConst, wrap(&OpLevelCostEstimator::PredictNoOp)},
@@ -268,67 +270,70 @@ OpLevelCostEstimator::OpLevelCostEstimator() {
EIGEN_COST(scalar_product_op<float>) + EIGEN_COST(scalar_max_op<float>) +
EIGEN_COST(scalar_min_op<float>) + EIGEN_COST(scalar_round_op<float>);
- elementwise_ops_ = {// Unary ops alphabetically sorted
- {"Acos", EIGEN_COST(scalar_acos_op<float>)},
- {"Asin", EIGEN_COST(scalar_asin_op<float>)},
- {"Atan", EIGEN_COST(scalar_atan_op<float>)},
- {"Atan2", EIGEN_COST(scalar_quotient_op<float>) +
- EIGEN_COST(scalar_atan_op<float>)},
- {"Ceil", EIGEN_COST(scalar_ceil_op<float>)},
- {"Cos", EIGEN_COST(scalar_cos_op<float>)},
- {"Dequantize", EIGEN_COST(scalar_product_op<float>)},
- {"Erf", 1},
- {"Erfc", 1},
- {"Exp", EIGEN_COST(scalar_exp_op<float>)},
- {"Expm1", EIGEN_COST(scalar_expm1_op<float>)},
- {"Floor", EIGEN_COST(scalar_floor_op<float>)},
- {"Inv", EIGEN_COST(scalar_inverse_op<float>)},
- {"InvGrad", 1},
- {"Lgamma", 1},
- {"Log", EIGEN_COST(scalar_log_op<float>)},
- {"Log1p", EIGEN_COST(scalar_log1p_op<float>)},
- {"Neg", EIGEN_COST(scalar_opposite_op<float>)},
- {"QuantizeV2", quantize_v2_cost},
- {"Reciprocal", EIGEN_COST(scalar_inverse_op<float>)},
- {"Rint", 1},
- {"Round", EIGEN_COST(scalar_round_op<float>)},
- {"Rsqrt", EIGEN_COST(scalar_rsqrt_op<float>)},
- {"Sqrt", EIGEN_COST(scalar_sqrt_op<float>)},
- {"Square", EIGEN_COST(scalar_square_op<float>)},
- {"Tanh", EIGEN_COST(scalar_tanh_op<float>)},
- {"Relu", EIGEN_COST(scalar_max_op<float>)},
- {"Sigmoid", EIGEN_COST(scalar_sigmoid_op<float>)},
- {"Sign", EIGEN_COST(scalar_sign_op<float>)},
- {"Sin", EIGEN_COST(scalar_sin_op<float>)},
- {"Tan", EIGEN_COST(scalar_tan_op<float>)},
- // Binary ops alphabetically sorted
- {"Add", EIGEN_COST(scalar_sum_op<float>)},
- {"ApproximateEqual", 1},
- {"BiasAdd", EIGEN_COST(scalar_sum_op<float>)},
- {"Div", EIGEN_COST(scalar_quotient_op<float>)},
- {"Equal", 1},
- {"FloorDiv", EIGEN_COST(scalar_quotient_op<float>)},
- {"FloorMod", EIGEN_COST(scalar_mod_op<float>)},
- {"Greater", 1},
- {"GreaterEqual", 1},
- {"Less", 1},
- {"LessEqual", 1},
- {"LogicalAnd", EIGEN_COST(scalar_boolean_and_op)},
- {"LogicalNot", 1},
- {"LogicalOr", EIGEN_COST(scalar_boolean_or_op)},
- {"Maximum", EIGEN_COST(scalar_max_op<float>)},
- {"Minimum", EIGEN_COST(scalar_min_op<float>)},
- {"Mod", EIGEN_COST(scalar_mod_op<float>)},
- {"Mul", EIGEN_COST(scalar_product_op<float>)},
- {"NotEqual", 1},
- {"QuantizedAdd", EIGEN_COST(scalar_sum_op<float>)},
- {"QuantizedMul", EIGEN_COST(scalar_product_op<float>)},
- {"RealDiv", EIGEN_COST(scalar_quotient_op<float>)},
- {"ReluGrad", EIGEN_COST(scalar_max_op<float>)},
- {"SquareDifference", 1},
- {"Sub", EIGEN_COST(scalar_difference_op<float>)},
- {"TruncateDiv", EIGEN_COST(scalar_quotient_op<float>)},
- {"TruncateMod", EIGEN_COST(scalar_mod_op<float>)}};
+ elementwise_ops_ = {
+ // Unary ops alphabetically sorted
+ {"Acos", EIGEN_COST(scalar_acos_op<float>)},
+ {"Asin", EIGEN_COST(scalar_asin_op<float>)},
+ {"Atan", EIGEN_COST(scalar_atan_op<float>)},
+ {"Atan2", EIGEN_COST(scalar_quotient_op<float>) +
+ EIGEN_COST(scalar_atan_op<float>)},
+ {"Ceil", EIGEN_COST(scalar_ceil_op<float>)},
+ {"Cos", EIGEN_COST(scalar_cos_op<float>)},
+ {"Dequantize", EIGEN_COST(scalar_product_op<float>)},
+ {"Erf", 1},
+ {"Erfc", 1},
+ {"Exp", EIGEN_COST(scalar_exp_op<float>)},
+ {"Expm1", EIGEN_COST(scalar_expm1_op<float>)},
+ {"Floor", EIGEN_COST(scalar_floor_op<float>)},
+ {"Inv", EIGEN_COST(scalar_inverse_op<float>)},
+ {"InvGrad", 1},
+ {"Lgamma", 1},
+ {"Log", EIGEN_COST(scalar_log_op<float>)},
+ {"Log1p", EIGEN_COST(scalar_log1p_op<float>)},
+ {"Neg", EIGEN_COST(scalar_opposite_op<float>)},
+ {"QuantizeV2", quantize_v2_cost},
+ {"Reciprocal", EIGEN_COST(scalar_inverse_op<float>)},
+ {"Rint", 1},
+ {"Round", EIGEN_COST(scalar_round_op<float>)},
+ {"Rsqrt", EIGEN_COST(scalar_rsqrt_op<float>)},
+ {"Sqrt", EIGEN_COST(scalar_sqrt_op<float>)},
+ {"Square", EIGEN_COST(scalar_square_op<float>)},
+ {"Tanh", EIGEN_COST(scalar_tanh_op<float>)},
+ {"Relu", EIGEN_COST(scalar_max_op<float>)},
+ {"Sigmoid", EIGEN_COST(scalar_sigmoid_op<float>)},
+ {"QuantizedSigmoid", EIGEN_COST(scalar_sigmoid_op<float>)},
+ {"Sign", EIGEN_COST(scalar_sign_op<float>)},
+ {"Sin", EIGEN_COST(scalar_sin_op<float>)},
+ {"Tan", EIGEN_COST(scalar_tan_op<float>)},
+ // Binary ops alphabetically sorted
+ {"Add", EIGEN_COST(scalar_sum_op<float>)},
+ {"ApproximateEqual", 1},
+ {"BiasAdd", EIGEN_COST(scalar_sum_op<float>)},
+ {"QuantizedBiasAdd", EIGEN_COST(scalar_sum_op<float>)},
+ {"Div", EIGEN_COST(scalar_quotient_op<float>)},
+ {"Equal", 1},
+ {"FloorDiv", EIGEN_COST(scalar_quotient_op<float>)},
+ {"FloorMod", EIGEN_COST(scalar_mod_op<float>)},
+ {"Greater", 1},
+ {"GreaterEqual", 1},
+ {"Less", 1},
+ {"LessEqual", 1},
+ {"LogicalAnd", EIGEN_COST(scalar_boolean_and_op)},
+ {"LogicalNot", 1},
+ {"LogicalOr", EIGEN_COST(scalar_boolean_or_op)},
+ {"Maximum", EIGEN_COST(scalar_max_op<float>)},
+ {"Minimum", EIGEN_COST(scalar_min_op<float>)},
+ {"Mod", EIGEN_COST(scalar_mod_op<float>)},
+ {"Mul", EIGEN_COST(scalar_product_op<float>)},
+ {"NotEqual", 1},
+ {"QuantizedAdd", EIGEN_COST(scalar_sum_op<float>)},
+ {"QuantizedMul", EIGEN_COST(scalar_product_op<float>)},
+ {"RealDiv", EIGEN_COST(scalar_quotient_op<float>)},
+ {"ReluGrad", EIGEN_COST(scalar_max_op<float>)},
+ {"SquareDifference", 1},
+ {"Sub", EIGEN_COST(scalar_difference_op<float>)},
+ {"TruncateDiv", EIGEN_COST(scalar_quotient_op<float>)},
+ {"TruncateMod", EIGEN_COST(scalar_mod_op<float>)}};
#undef EIGEN_COST
@@ -675,7 +680,7 @@ int64 OpLevelCostEstimator::CountMatMulOperations(
}
ops = m_dim * n_dim * k_dim * 2;
- VLOG(1) << "Operations for Matmul" << ops;
+ VLOG(1) << "Operations for Matmul: " << ops;
if (mat_mul != nullptr) {
mat_mul->m = m_dim;
@@ -972,8 +977,10 @@ int64 OpLevelCostEstimator::CalculateTensorElementCount(
int64 OpLevelCostEstimator::CalculateTensorSize(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) const {
- return CalculateTensorElementCount(tensor, found_unknown_shapes) *
- DataTypeSize(BaseType(tensor.dtype()));
+ int64 count = CalculateTensorElementCount(tensor, found_unknown_shapes);
+ int size = DataTypeSize(BaseType(tensor.dtype()));
+ VLOG(2) << "Count: " << count << " DataTypeSize: " << size;
+ return count * size;
}
int64 OpLevelCostEstimator::CalculateInputSize(
diff --git a/tensorflow/core/grappler/costs/virtual_scheduler.cc b/tensorflow/core/grappler/costs/virtual_scheduler.cc
index 7f68272950..6a1b0aebfa 100644
--- a/tensorflow/core/grappler/costs/virtual_scheduler.cc
+++ b/tensorflow/core/grappler/costs/virtual_scheduler.cc
@@ -30,6 +30,7 @@ limitations under the License.
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/device_name_utils.h"
@@ -858,8 +859,9 @@ Costs VirtualScheduler::Summary() const {
const auto& memory_cost = op_cost_pair.second.memory_time.count();
const bool is_op_cost_accurate = !op_cost_pair.second.inaccurate;
if (cost) { // Skip printing out zero-cost ops.
- VLOG(1) << " + " << op << " : " << (is_op_cost_accurate ? "" : "~")
- << cost << " / " << compute_cost << " / " << memory_cost;
+ VLOG(1) << strings::Printf(" + %30s : %c %10ld / %10ld / %10ld",
+ op.c_str(), (is_op_cost_accurate ? ' ' : '~'),
+ cost, compute_cost, memory_cost);
}
}
@@ -934,9 +936,11 @@ Costs VirtualScheduler::Summary() const {
: 0.0;
if (cost || mem_usage_percent > 1.0) {
// Print out only non-zero cost ops or ops with > 1% memory usage.
- VLOG(1) << " + " << op << " : " << (is_op_cost_accurate ? "" : "~")
- << cost << " / " << compute_cost << " / " << memory_cost << " ("
- << strings::HumanReadableNumBytes(op_mem_usage) << " ["
+ VLOG(1) << strings::Printf(" + %30s : %c %10ld / %10ld / %10ld",
+ op.c_str(),
+ (is_op_cost_accurate ? ' ' : '~'), cost,
+ compute_cost, memory_cost)
+ << " (" << strings::HumanReadableNumBytes(op_mem_usage) << " ["
<< mem_usage_percent << "%] "
<< (persisent_ops.count(op) > 0 ? ": persistent op)" : ")");
}
diff --git a/tensorflow/core/grappler/optimizers/data/BUILD b/tensorflow/core/grappler/optimizers/data/BUILD
index 3cb9d4d61c..c8946c499c 100644
--- a/tensorflow/core/grappler/optimizers/data/BUILD
+++ b/tensorflow/core/grappler/optimizers/data/BUILD
@@ -48,10 +48,7 @@ cc_library(
"//tensorflow/core:lib",
"//tensorflow/core/grappler:graph_view",
"//tensorflow/core/grappler:grappler_item",
- "//tensorflow/core/grappler:grappler_item_builder",
"//tensorflow/core/grappler:utils",
- "//tensorflow/core/grappler/clusters:virtual_cluster",
- "//tensorflow/core/grappler/optimizers:meta_optimizer",
] + tf_protos_all(),
)
diff --git a/tensorflow/core/grappler/optimizers/data/graph_utils.cc b/tensorflow/core/grappler/optimizers/data/graph_utils.cc
index b5b46ccafe..ea5f450009 100644
--- a/tensorflow/core/grappler/optimizers/data/graph_utils.cc
+++ b/tensorflow/core/grappler/optimizers/data/graph_utils.cc
@@ -16,11 +16,7 @@ limitations under the License.
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/framework/device_base.h"
-#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/graph_view.h"
-#include "tensorflow/core/grappler/grappler_item.h"
-#include "tensorflow/core/grappler/grappler_item_builder.h"
-#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD
index 7599cf7db2..2cb54bd973 100644
--- a/tensorflow/core/kernels/BUILD
+++ b/tensorflow/core/kernels/BUILD
@@ -1105,6 +1105,29 @@ tf_cc_test(
],
)
+tf_cuda_cc_test(
+ name = "depthwise_conv_ops_test",
+ size = "small",
+ srcs = ["depthwise_conv_ops_test.cc"],
+ tags = ["requires-gpu-sm35"],
+ deps = [
+ ":conv_ops",
+ ":image",
+ ":ops_testutil",
+ ":ops_util",
+ "//tensorflow/cc:cc_ops",
+ "//tensorflow/core:core_cpu",
+ "//tensorflow/core:framework",
+ "//tensorflow/core:framework_internal",
+ "//tensorflow/core:lib",
+ "//tensorflow/core:protos_all_cc",
+ "//tensorflow/core:tensorflow",
+ "//tensorflow/core:test",
+ "//tensorflow/core:test_main",
+ "//tensorflow/core:testlib",
+ ],
+)
+
tf_cc_test(
name = "decode_wav_op_test",
size = "small",
@@ -4826,6 +4849,8 @@ filegroup(
"cast_op_impl_int64.cc",
"cast_op_impl_int8.cc",
"cast_op_impl_uint16.cc",
+ "cast_op_impl_uint32.cc",
+ "cast_op_impl_uint64.cc",
"cast_op_impl_uint8.cc",
"concat_lib.h",
"concat_lib_cpu.cc",
@@ -5200,6 +5225,16 @@ filegroup(
visibility = ["//visibility:public"],
)
+ANDROID_TEXTUAL_HDRS = [
+ "gather_nd_op_cpu_impl.h",
+ "gemm_functors.h",
+ "mirror_pad_op_cpu_impl.h",
+ "scatter_nd_op_cpu_impl.h",
+ "slice_op_cpu_impl.h",
+ "strided_slice_op_impl.h",
+ "tile_ops_cpu_impl.h",
+]
+
# A file group which contains nearly all available operators which
# may work on Android. This is intended to be used with selective
# registration.
@@ -5261,10 +5296,20 @@ filegroup(
"batch_kernels.*",
"regex_full_match_op.cc",
"regex_replace_op.cc",
- ],
+ # Ops that are inherently incompatible with Android (e.g. tied to x86 platform).
+ "mkl_*",
+ "xsmm_*",
+ "cwise_ops_sycl_common.h",
+ ] + ANDROID_TEXTUAL_HDRS,
),
visibility = ["//visibility:public"],
)
+
+filegroup(
+ name = "android_all_ops_textual_hdrs",
+ srcs = ANDROID_TEXTUAL_HDRS,
+ visibility = ["//visibility:public"],
+)
# LINT.ThenChange(//tensorflow/contrib/makefile/tf_op_files.txt)
cc_library(
@@ -6288,6 +6333,7 @@ tf_kernel_library(
"//tensorflow/core:lib",
"//tensorflow/core/util/proto:decode",
"//tensorflow/core/util/proto:descriptors",
+ "//tensorflow/core/util/proto:proto_utils",
"//third_party/eigen3",
],
)
@@ -6300,6 +6346,7 @@ tf_kernel_library(
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core/util/proto:descriptors",
+ "//tensorflow/core/util/proto:proto_utils",
"//third_party/eigen3",
],
)
diff --git a/tensorflow/core/kernels/argmax_op.cc b/tensorflow/core/kernels/argmax_op.cc
index 49cd997fed..c731b64993 100644
--- a/tensorflow/core/kernels/argmax_op.cc
+++ b/tensorflow/core/kernels/argmax_op.cc
@@ -59,7 +59,7 @@ class ArgOp : public OpKernel {
int axis = dim < 0 ? dim + input_dims : dim;
- OP_REQUIRES(context, axis >= 0 && axis < input_dims,
+ OP_REQUIRES(context, FastBoundsCheck(axis, input_dims),
errors::InvalidArgument("Expected dimension in the range [",
-input_dims, ", ", input_dims,
"), but got ", dim));
@@ -76,6 +76,10 @@ class ArgOp : public OpKernel {
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
+ if (output_shape.num_elements() == 0) {
+ return;
+ }
+
#define HANDLE_DIM(NDIM) \
case NDIM: \
ArgFunctor::Reduce##NDIM(context->eigen_device<Device>(), \
diff --git a/tensorflow/core/kernels/cast_op.cc b/tensorflow/core/kernels/cast_op.cc
index 626db9131a..b4c97df38b 100644
--- a/tensorflow/core/kernels/cast_op.cc
+++ b/tensorflow/core/kernels/cast_op.cc
@@ -41,8 +41,10 @@ typedef Eigen::SyclDevice SYCLDevice;
#define CURRY_TYPES2(FN, arg0) \
FN(arg0, bool); \
FN(arg0, uint8); \
- FN(arg0, int8); \
FN(arg0, uint16); \
+ FN(arg0, uint32); \
+ FN(arg0, uint64); \
+ FN(arg0, int8); \
FN(arg0, int16); \
FN(arg0, int32); \
FN(arg0, int64); \
@@ -53,8 +55,39 @@ typedef Eigen::SyclDevice SYCLDevice;
FN(arg0, std::complex<double>)
CastOpBase::CastOpBase(OpKernelConstruction* ctx) : OpKernel(ctx) {
- OP_REQUIRES_OK(ctx, ctx->GetAttr("SrcT", &src_dtype_));
- OP_REQUIRES_OK(ctx, ctx->GetAttr("DstT", &dst_dtype_));
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("SrcT", &external_src_dtype_));
+
+ OP_REQUIRES_OK(ctx, ctx->GetAttr("DstT", &external_dst_dtype_));
+
+ // Quantized data types use the same underlying format as their non quantized
+ // version so we use the non quantized implementation for casting.
+ if (external_dst_dtype_ == DT_QUINT8) {
+ dst_dtype_ = DT_UINT8;
+ } else if (external_dst_dtype_ == DT_QINT8) {
+ dst_dtype_ = DT_INT8;
+ } else if (external_dst_dtype_ == DT_QINT32) {
+ dst_dtype_ = DT_INT32;
+ } else if (external_dst_dtype_ == DT_QINT16) {
+ dst_dtype_ = DT_INT16;
+ } else if (external_dst_dtype_ == DT_QUINT16) {
+ dst_dtype_ = DT_UINT16;
+ } else {
+ dst_dtype_ = external_dst_dtype_;
+ }
+
+ if (external_src_dtype_ == DT_QUINT8) {
+ src_dtype_ = DT_UINT8;
+ } else if (external_src_dtype_ == DT_QINT8) {
+ src_dtype_ = DT_INT8;
+ } else if (external_src_dtype_ == DT_QINT32) {
+ src_dtype_ = DT_INT32;
+ } else if (external_src_dtype_ == DT_QINT16) {
+ src_dtype_ = DT_INT16;
+ } else if (external_src_dtype_ == DT_QUINT16) {
+ src_dtype_ = DT_UINT16;
+ } else {
+ src_dtype_ = external_src_dtype_;
+ }
}
void CastOpBase::Compute(OpKernelContext* ctx) {
@@ -62,15 +95,20 @@ void CastOpBase::Compute(OpKernelContext* ctx) {
if (work_ == nullptr) {
ctx->set_output(0, inp);
} else {
+ Tensor in;
+ in.UnsafeCopyFromInternal(inp, src_dtype_, inp.shape());
Tensor* out = nullptr;
- OP_REQUIRES_OK(ctx, ctx->allocate_output(0, inp.shape(), &out));
- work_(ctx, inp, out);
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, in.shape(), &out));
+ out->set_dtype(dst_dtype_);
+ work_(ctx, in, out);
+ out->set_dtype(external_dst_dtype_);
}
}
Status CastOpBase::Unimplemented() {
- return errors::Unimplemented("Cast ", DataTypeString(src_dtype_), " to ",
- DataTypeString(dst_dtype_), " is not supported");
+ return errors::Unimplemented("Cast ", DataTypeString(external_src_dtype_),
+ " to ", DataTypeString(external_dst_dtype_),
+ " is not supported");
}
CpuCastOp::CpuCastOp(OpKernelConstruction* ctx) : CastOpBase(ctx) {
@@ -78,7 +116,7 @@ CpuCastOp::CpuCastOp(OpKernelConstruction* ctx) : CastOpBase(ctx) {
}
Status CpuCastOp::Prepare() {
- if (src_dtype_ == dst_dtype_) {
+ if (external_src_dtype_ == external_dst_dtype_) {
work_ = nullptr; // Identity
return Status::OK();
}
@@ -86,10 +124,14 @@ Status CpuCastOp::Prepare() {
work_ = GetCpuCastFromBool(dst_dtype_);
} else if (src_dtype_ == DT_UINT8) {
work_ = GetCpuCastFromUint8(dst_dtype_);
- } else if (src_dtype_ == DT_INT8) {
- work_ = GetCpuCastFromInt8(dst_dtype_);
} else if (src_dtype_ == DT_UINT16) {
work_ = GetCpuCastFromUint16(dst_dtype_);
+ } else if (src_dtype_ == DT_UINT32) {
+ work_ = GetCpuCastFromUint32(dst_dtype_);
+ } else if (src_dtype_ == DT_UINT64) {
+ work_ = GetCpuCastFromUint64(dst_dtype_);
+ } else if (src_dtype_ == DT_INT8) {
+ work_ = GetCpuCastFromInt8(dst_dtype_);
} else if (src_dtype_ == DT_INT16) {
work_ = GetCpuCastFromInt16(dst_dtype_);
} else if (src_dtype_ == DT_INT32) {
@@ -127,7 +169,7 @@ class GpuCastOp : public CastOpBase {
private:
Status Prepare() {
- if (src_dtype_ == dst_dtype_) {
+ if (external_src_dtype_ == external_dst_dtype_) {
work_ = nullptr; // Identity
return Status::OK();
}
@@ -135,10 +177,14 @@ class GpuCastOp : public CastOpBase {
work_ = GetGpuCastFromBool(dst_dtype_);
} else if (src_dtype_ == DT_UINT8) {
work_ = GetGpuCastFromUint8(dst_dtype_);
- } else if (src_dtype_ == DT_INT8) {
- work_ = GetGpuCastFromInt8(dst_dtype_);
} else if (src_dtype_ == DT_UINT16) {
work_ = GetGpuCastFromUint16(dst_dtype_);
+ } else if (src_dtype_ == DT_UINT32) {
+ work_ = GetGpuCastFromUint32(dst_dtype_);
+ } else if (src_dtype_ == DT_UINT64) {
+ work_ = GetGpuCastFromUint64(dst_dtype_);
+ } else if (src_dtype_ == DT_INT8) {
+ work_ = GetGpuCastFromInt8(dst_dtype_);
} else if (src_dtype_ == DT_INT16) {
work_ = GetGpuCastFromInt16(dst_dtype_);
} else if (src_dtype_ == DT_INT32) {
@@ -178,8 +224,10 @@ REGISTER_KERNEL_BUILDER(Name("Cast").Device(DEVICE_CPU), CpuCastOp);
CURRY_TYPES2(REGISTER_CAST_GPU, bool);
CURRY_TYPES2(REGISTER_CAST_GPU, uint8);
-CURRY_TYPES2(REGISTER_CAST_GPU, int8);
CURRY_TYPES2(REGISTER_CAST_GPU, uint16);
+CURRY_TYPES2(REGISTER_CAST_GPU, uint32);
+CURRY_TYPES2(REGISTER_CAST_GPU, uint64);
+CURRY_TYPES2(REGISTER_CAST_GPU, int8);
CURRY_TYPES2(REGISTER_CAST_GPU, int16);
CURRY_TYPES2(REGISTER_CAST_GPU, int32);
CURRY_TYPES2(REGISTER_CAST_GPU, int64);
@@ -203,7 +251,7 @@ class SyclCastOp : public CastOpBase {
private:
Status Prepare() {
- if (src_dtype_ == dst_dtype_) {
+ if (external_src_dtype_ == external_dst_dtype_) {
work_ = nullptr; // Identity
return Status::OK();
}
diff --git a/tensorflow/core/kernels/cast_op.h b/tensorflow/core/kernels/cast_op.h
index 16d2e0e0a5..aae1e7ff19 100644
--- a/tensorflow/core/kernels/cast_op.h
+++ b/tensorflow/core/kernels/cast_op.h
@@ -36,6 +36,8 @@ class CastOpBase : public OpKernel {
protected:
DataType src_dtype_;
DataType dst_dtype_;
+ DataType external_src_dtype_;
+ DataType external_dst_dtype_;
std::function<void(OpKernelContext*, const Tensor&, Tensor*)> work_ = nullptr;
Status Unimplemented();
diff --git a/tensorflow/core/kernels/cast_op_gpu.cu.cc b/tensorflow/core/kernels/cast_op_gpu.cu.cc
index 9c9e9e7658..607e7f5efd 100644
--- a/tensorflow/core/kernels/cast_op_gpu.cu.cc
+++ b/tensorflow/core/kernels/cast_op_gpu.cu.cc
@@ -37,8 +37,10 @@ struct CastFunctor<GPUDevice, O, I> {
#define DEFINE_ALL_FROM(in_type) \
DEFINE(in_type, bool); \
DEFINE(in_type, uint8); \
- DEFINE(in_type, int8); \
DEFINE(in_type, uint16); \
+ DEFINE(in_type, uint32); \
+ DEFINE(in_type, uint64); \
+ DEFINE(in_type, int8); \
DEFINE(in_type, int16); \
DEFINE(in_type, int32); \
DEFINE(in_type, int64); \
@@ -50,8 +52,10 @@ struct CastFunctor<GPUDevice, O, I> {
DEFINE_ALL_FROM(bool);
DEFINE_ALL_FROM(uint8);
-DEFINE_ALL_FROM(int8);
DEFINE_ALL_FROM(uint16);
+DEFINE_ALL_FROM(uint32);
+DEFINE_ALL_FROM(uint64);
+DEFINE_ALL_FROM(int8);
DEFINE_ALL_FROM(int16);
DEFINE_ALL_FROM(int32);
DEFINE_ALL_FROM(int64);
diff --git a/tensorflow/core/kernels/cast_op_impl.h b/tensorflow/core/kernels/cast_op_impl.h
index 382e5440e1..fe821b25df 100644
--- a/tensorflow/core/kernels/cast_op_impl.h
+++ b/tensorflow/core/kernels/cast_op_impl.h
@@ -48,8 +48,10 @@ struct CastFunctor<Eigen::SyclDevice, O, I> {
#define CURRY_TYPES3_NO_HALF(FN, arg0, arg1) \
FN(arg0, arg1, bool); \
FN(arg0, arg1, uint8); \
- FN(arg0, arg1, int8); \
FN(arg0, arg1, uint16); \
+ FN(arg0, arg1, uint32); \
+ FN(arg0, arg1, uint64); \
+ FN(arg0, arg1, int8); \
FN(arg0, arg1, int16); \
FN(arg0, arg1, int32); \
FN(arg0, arg1, int64); \
@@ -82,10 +84,16 @@ std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
GetCpuCastFromUint8(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
-GetCpuCastFromInt8(DataType dst_dtype);
+GetCpuCastFromUint16(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
-GetCpuCastFromUint16(DataType dst_dtype);
+GetCpuCastFromUint32(DataType dst_dtype);
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetCpuCastFromUint64(DataType dst_dtype);
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetCpuCastFromInt8(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
GetCpuCastFromInt16(DataType dst_dtype);
@@ -123,10 +131,16 @@ std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
GetGpuCastFromUint8(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
-GetGpuCastFromInt8(DataType dst_dtype);
+GetGpuCastFromUint16(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
-GetGpuCastFromUint16(DataType dst_dtype);
+GetGpuCastFromUint32(DataType dst_dtype);
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetGpuCastFromUint64(DataType dst_dtype);
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetGpuCastFromInt8(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
GetGpuCastFromInt16(DataType dst_dtype);
@@ -168,6 +182,12 @@ std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
GetSyclCastFromUint16(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetSyclCastFromUint32(DataType dst_dtype);
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetSyclCastFromUint64(DataType dst_dtype);
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
GetSyclCastFromInt16(DataType dst_dtype);
std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
diff --git a/tensorflow/core/kernels/cast_op_impl_uint32.cc b/tensorflow/core/kernels/cast_op_impl_uint32.cc
new file mode 100644
index 0000000000..d1a854d98b
--- /dev/null
+++ b/tensorflow/core/kernels/cast_op_impl_uint32.cc
@@ -0,0 +1,46 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/kernels/cast_op_impl.h"
+
+namespace tensorflow {
+
+typedef Eigen::ThreadPoolDevice CPUDevice;
+typedef Eigen::GpuDevice GPUDevice;
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetCpuCastFromUint32(DataType dst_dtype) {
+ CURRY_TYPES3(CAST_CASE, CPUDevice, uint32);
+ return nullptr;
+}
+
+#if GOOGLE_CUDA
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetGpuCastFromUint32(DataType dst_dtype) {
+ CURRY_TYPES3_NO_BF16(CAST_CASE, GPUDevice, uint32);
+ return nullptr;
+}
+#endif // GOOGLE_CUDA
+
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetSyclCastFromUint32(DataType dst_dtype) {
+ CURRY_TYPES3_NO_HALF(CAST_CASE, SYCLDevice, uint32);
+ return nullptr;
+}
+#endif // TENSORFLOW_USE_SYCL
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cast_op_impl_uint64.cc b/tensorflow/core/kernels/cast_op_impl_uint64.cc
new file mode 100644
index 0000000000..604e0424fc
--- /dev/null
+++ b/tensorflow/core/kernels/cast_op_impl_uint64.cc
@@ -0,0 +1,46 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/kernels/cast_op_impl.h"
+
+namespace tensorflow {
+
+typedef Eigen::ThreadPoolDevice CPUDevice;
+typedef Eigen::GpuDevice GPUDevice;
+
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetCpuCastFromUint64(DataType dst_dtype) {
+ CURRY_TYPES3(CAST_CASE, CPUDevice, uint64);
+ return nullptr;
+}
+
+#if GOOGLE_CUDA
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetGpuCastFromUint64(DataType dst_dtype) {
+ CURRY_TYPES3_NO_BF16(CAST_CASE, GPUDevice, uint64);
+ return nullptr;
+}
+#endif // GOOGLE_CUDA
+
+#ifdef TENSORFLOW_USE_SYCL
+typedef Eigen::SyclDevice SYCLDevice;
+std::function<void(OpKernelContext*, const Tensor&, Tensor*)>
+GetSyclCastFromUint64(DataType dst_dtype) {
+ CURRY_TYPES3_NO_HALF(CAST_CASE, SYCLDevice, uint64);
+ return nullptr;
+}
+#endif // TENSORFLOW_USE_SYCL
+
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/cast_op_test.cc b/tensorflow/core/kernels/cast_op_test.cc
index 7da9d28a3d..9bbf7afb16 100644
--- a/tensorflow/core/kernels/cast_op_test.cc
+++ b/tensorflow/core/kernels/cast_op_test.cc
@@ -70,16 +70,25 @@ class CastOpTest : public OpsTestBase {
#define TEST_ALL_CASTS_FROM(in) \
TEST_CAST(in, uint8); \
TEST_CAST(in, uint16); \
+ TEST_CAST(in, uint32); \
+ TEST_CAST(in, uint64); \
TEST_CAST(in, int16); \
TEST_CAST(in, int32); \
TEST_CAST(in, int64); \
TEST_CAST(in, half); \
TEST_CAST(in, float); \
TEST_CAST(in, double); \
- TEST_CAST(in, bfloat16);
+ TEST_CAST(in, bfloat16); \
+ TEST_CAST(in, quint8); \
+ TEST_CAST(in, qint8); \
+ TEST_CAST(in, qint32); \
+ TEST_CAST(in, qint16); \
+ TEST_CAST(in, quint16);
TEST_ALL_CASTS_FROM(uint8)
TEST_ALL_CASTS_FROM(uint16)
+TEST_ALL_CASTS_FROM(uint32)
+TEST_ALL_CASTS_FROM(uint64)
TEST_ALL_CASTS_FROM(int16)
TEST_ALL_CASTS_FROM(int32)
TEST_ALL_CASTS_FROM(int64)
@@ -87,6 +96,11 @@ TEST_ALL_CASTS_FROM(half)
TEST_ALL_CASTS_FROM(float)
TEST_ALL_CASTS_FROM(double)
TEST_ALL_CASTS_FROM(bfloat16)
+TEST_ALL_CASTS_FROM(quint8)
+TEST_ALL_CASTS_FROM(qint8)
+TEST_ALL_CASTS_FROM(qint32)
+TEST_ALL_CASTS_FROM(qint16)
+TEST_ALL_CASTS_FROM(quint16)
#undef TEST_ALL_CASTS_FROM
#undef TEST_CAST
diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc
index aca75176a5..63b1bcda43 100644
--- a/tensorflow/core/kernels/conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc
@@ -909,6 +909,7 @@ void LaunchConv2DBackpropFilterOp<Eigen::GpuDevice, T>::operator()(
dims.in_depth, // in_depths
{{input_desc.height(), // in_rows
input_desc.width()}}, // in_cols
+ FORMAT_NCHW, // compute_data_format
dims.out_depth, // out_depths
{{dims.spatial_dims[0].filter_size, // filter_rows
dims.spatial_dims[1].filter_size, // filter_cols
diff --git a/tensorflow/core/kernels/conv_grad_input_ops.cc b/tensorflow/core/kernels/conv_grad_input_ops.cc
index 63a775afa8..d664a11e73 100644
--- a/tensorflow/core/kernels/conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/conv_grad_input_ops.cc
@@ -957,6 +957,7 @@ void LaunchConv2DBackpropInputOp<GPUDevice, T>::operator()(
dims.in_depth, // in_depths
{{input_desc.height(), // in_rows
input_desc.width()}}, // in_cols
+ FORMAT_NCHW, // compute_data_format
dims.out_depth, // out_depths
{{dims.spatial_dims[0].filter_size, // filter_rows
dims.spatial_dims[1].filter_size, // filter_cols
diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc
index 980b1063de..15f1bf9aba 100644
--- a/tensorflow/core/kernels/conv_grad_ops_3d.cc
+++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc
@@ -716,6 +716,7 @@ class Conv3DBackpropInputOp<GPUDevice, T> : public OpKernel {
batch,
in_depth,
{{input_size[0], input_size[1], input_size[2]}},
+ FORMAT_NCHW,
out_depth,
{{filter_size[0], filter_size[1], filter_size[2]}},
{{dilations[0], dilations[1], dilations[2]}},
@@ -1112,6 +1113,7 @@ class Conv3DBackpropFilterOp<GPUDevice, T> : public OpKernel {
batch,
in_depth,
{{input_size[0], input_size[1], input_size[2]}},
+ FORMAT_NCHW,
out_depth,
{{filter_size[0], filter_size[1], filter_size[2]}},
{{dilations[0], dilations[1], dilations[2]}},
diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc
index 3b9886eece..ef692418d6 100644
--- a/tensorflow/core/kernels/conv_ops.cc
+++ b/tensorflow/core/kernels/conv_ops.cc
@@ -713,6 +713,7 @@ void LaunchConv2DOp<GPUDevice, T>::operator()(
in_depths, // in_depths
{{in_rows, // in_rows
in_cols}}, // in_cols
+ FORMAT_NCHW, // compute_data_format
out_depths, // out_depths
{{patch_rows, // filter_rows
patch_cols, // filter_cols
diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc
index 9ec16be67d..a1eed4e68c 100644
--- a/tensorflow/core/kernels/conv_ops_3d.cc
+++ b/tensorflow/core/kernels/conv_ops_3d.cc
@@ -415,6 +415,7 @@ struct LaunchConvOp<GPUDevice, T> {
in_batch,
in_depth,
{{in_planes, in_rows, in_cols}},
+ FORMAT_NCHW,
out_depth,
{{filter_planes, filter_rows, filter_cols}},
{{dilations[0], dilations[1], dilations[2]}},
diff --git a/tensorflow/core/kernels/conv_ops_gpu.h b/tensorflow/core/kernels/conv_ops_gpu.h
index d2c8020bb6..afc611f277 100644
--- a/tensorflow/core/kernels/conv_ops_gpu.h
+++ b/tensorflow/core/kernels/conv_ops_gpu.h
@@ -85,13 +85,15 @@ class ConvParameters {
public:
using SpatialArray = gtl::InlinedVector<int64, 3>;
ConvParameters(int64 batch, int64 in_depths, const SpatialArray& in,
- int64 out_depths, const SpatialArray& filter,
- const SpatialArray& dilation, const SpatialArray& stride,
- const SpatialArray& padding, DataType dtype, int device_id)
+ TensorFormat data_format, int64 out_depths,
+ const SpatialArray& filter, const SpatialArray& dilation,
+ const SpatialArray& stride, const SpatialArray& padding,
+ DataType dtype, int device_id)
: batch_(batch),
in_depths_(in_depths),
out_depths_(out_depths),
in_(in),
+ data_format_(data_format),
filter_(filter),
dilation_(dilation),
stride_(stride),
@@ -101,6 +103,7 @@ class ConvParameters {
hash_code_ = batch;
hash_code_ = Hash64Combine(hash_code_, in_depths);
for (int64 val : in) hash_code_ = Hash64Combine(hash_code_, val);
+ hash_code_ = Hash64Combine(hash_code_, data_format);
hash_code_ = Hash64Combine(hash_code_, out_depths);
for (int64 val : filter) hash_code_ = Hash64Combine(hash_code_, val);
for (int64 val : dilation) hash_code_ = Hash64Combine(hash_code_, val);
@@ -123,6 +126,7 @@ class ConvParameters {
return strings::StrCat(
batch_, ", ", in_depths_, ", ",
"(", str_util::Join(in_, ", "), "), ",
+ ::tensorflow::ToString(data_format_), ", ",
out_depths_, ", ",
"(", str_util::Join(filter_, ", "), "), ",
"(", str_util::Join(dilation_, ", "), "), ",
@@ -148,12 +152,13 @@ class ConvParameters {
protected:
using ParameterDataType =
- std::tuple<int64, int64, SpatialArray, int64, SpatialArray, SpatialArray,
- SpatialArray, SpatialArray, DataType, int>;
+ std::tuple<int64, int64, SpatialArray, TensorFormat, int64, SpatialArray,
+ SpatialArray, SpatialArray, SpatialArray, DataType, int>;
ParameterDataType get_data_as_tuple() const {
- return std::make_tuple(batch_, in_depths_, in_, out_depths_, filter_,
- dilation_, stride_, padding_, dtype_, device_id_);
+ return std::make_tuple(batch_, in_depths_, in_, data_format_, out_depths_,
+ filter_, dilation_, stride_, padding_, dtype_,
+ device_id_);
}
uint64 hash_code_;
@@ -178,6 +183,7 @@ class ConvParameters {
int64 in_depths_;
int64 out_depths_;
SpatialArray in_;
+ TensorFormat data_format_;
SpatialArray filter_;
SpatialArray dilation_;
SpatialArray stride_;
diff --git a/tensorflow/core/kernels/conv_ops_test.cc b/tensorflow/core/kernels/conv_ops_test.cc
index 4f9a96ce17..c281153795 100644
--- a/tensorflow/core/kernels/conv_ops_test.cc
+++ b/tensorflow/core/kernels/conv_ops_test.cc
@@ -44,41 +44,43 @@ struct ConvParametersPeer {
TEST(ConvParameters, WinogradNonfusedAlgoSize) {
ConvParametersPeer conv_params_small = {{
- 1, // batch
- 32, // in_depths
- {{300, // in_rows
- 300}}, // in_cols
- 128, // out_depths
- {{3, // filter_rows
- 3}}, // filter_cols
- {{1, // dilation_rows
- 1}}, // dilation_cols
- {{1, // stride_rows
- 1}}, // stride_cols
- {{0, // padding_rows
- 0}}, // padding_cols
- DT_FLOAT, // tensor datatype
- 0, // device_id
+ 1, // batch
+ 32, // in_depths
+ {{300, // in_rows
+ 300}}, // in_cols
+ FORMAT_NCHW, // compute_data_format
+ 128, // out_depths
+ {{3, // filter_rows
+ 3}}, // filter_cols
+ {{1, // dilation_rows
+ 1}}, // dilation_cols
+ {{1, // stride_rows
+ 1}}, // stride_cols
+ {{0, // padding_rows
+ 0}}, // padding_cols
+ DT_FLOAT, // tensor datatype
+ 0, // device_id
}};
EXPECT_TRUE(
conv_params_small.ShouldIncludeWinogradNonfusedAlgoPreCudnn7<float>());
ConvParametersPeer conv_params_large = {{
- 1, // batch
- 128, // in_depths
- {{300, // in_rows
- 300}}, // in_cols
- 768, // out_depths
- {{3, // filter_rows
- 3}}, // filter_cols
- {{1, // dilation_rows
- 1}}, // dilation_cols
- {{1, // stride_rows
- 1}}, // stride_cols
- {{0, // padding_rows
- 0}}, // padding_cols
- DT_FLOAT, // tensor datatype
- 0, // device_id
+ 1, // batch
+ 128, // in_depths
+ {{300, // in_rows
+ 300}}, // in_cols
+ FORMAT_NCHW, // compute_data_format
+ 768, // out_depths
+ {{3, // filter_rows
+ 3}}, // filter_cols
+ {{1, // dilation_rows
+ 1}}, // dilation_cols
+ {{1, // stride_rows
+ 1}}, // stride_cols
+ {{0, // padding_rows
+ 0}}, // padding_cols
+ DT_FLOAT, // tensor datatype
+ 0, // device_id
}};
EXPECT_FALSE(
conv_params_large.ShouldIncludeWinogradNonfusedAlgoPreCudnn7<float>());
diff --git a/tensorflow/core/kernels/ctc_loss_op.cc b/tensorflow/core/kernels/ctc_loss_op.cc
index b38d838bf1..fb375ee4b3 100644
--- a/tensorflow/core/kernels/ctc_loss_op.cc
+++ b/tensorflow/core/kernels/ctc_loss_op.cc
@@ -100,8 +100,10 @@ class CTCLossOp : public OpKernel {
TensorShape labels_shape({batch_size, max_label_len});
std::vector<int64> order{0, 1};
- sparse::SparseTensor labels_sp(*labels_indices, *labels_values,
- labels_shape, order);
+ sparse::SparseTensor labels_sp;
+ OP_REQUIRES_OK(
+ ctx, sparse::SparseTensor::Create(*labels_indices, *labels_values,
+ labels_shape, order, &labels_sp));
Status labels_sp_valid = labels_sp.IndicesValid();
OP_REQUIRES(ctx, labels_sp_valid.ok(),
diff --git a/tensorflow/core/kernels/cuda_solvers.cc b/tensorflow/core/kernels/cuda_solvers.cc
index a857bd3ce4..a59baaa96f 100644
--- a/tensorflow/core/kernels/cuda_solvers.cc
+++ b/tensorflow/core/kernels/cuda_solvers.cc
@@ -151,7 +151,7 @@ CudaSolver::CudaSolver(OpKernelContext* context) : context_(context) {
reinterpret_cast<const cudaStream_t*>(context->op_device_context()
->stream()
->implementation()
- ->CudaStreamMemberHack()));
+ ->GpuStreamMemberHack()));
cuda_stream_ = *cu_stream_ptr;
HandleMap* handle_map = CHECK_NOTNULL(GetHandleMapSingleton());
auto it = handle_map->find(cuda_stream_);
diff --git a/tensorflow/core/kernels/data/iterator_ops.cc b/tensorflow/core/kernels/data/iterator_ops.cc
index 2a94a54f3d..da489db7c8 100644
--- a/tensorflow/core/kernels/data/iterator_ops.cc
+++ b/tensorflow/core/kernels/data/iterator_ops.cc
@@ -662,21 +662,89 @@ class MakeIteratorOp : public OpKernel {
}
};
+// A simple background worker that executes closures asynchronously and without
+// blocking.
+//
+// A `BackgroundWorker` is used to offload blocking work from an `AsyncOpKernel`
+// to avoid blocking an executor thread that may be required by the blocking
+// work.
+//
+// NOTE(mrry): We do not use a regular `tensorflow::thread::ThreadPool` for this
+// purpose because its current implementation (in Eigen) uses a finite-length
+// queue and will block the caller when full. This can lead to deadlock under
+// heavy load. Since the number of concurrent work items in each user of a
+// `BackgroundWorker` is at most one per op invocation, the dynamic allocation
+// overhead is tolerable.
+class BackgroundWorker {
+ public:
+ BackgroundWorker(Env* env, const string& name) {
+ thread_.reset(env->StartThread({} /* thread_options */, name,
+ [this]() { WorkerLoop(); }));
+ }
+
+ ~BackgroundWorker() {
+ {
+ mutex_lock l(mu_);
+ cancelled_ = true;
+ }
+ cond_var_.notify_one();
+ // Block until the background thread has terminated.
+ //
+ // NOTE(mrry): We explicitly free and join the thread here because
+ // `WorkerLoop()` uses other members of this object, and so we must join
+ // the thread before destroying them.
+ thread_.reset();
+ }
+
+ void Schedule(std::function<void()> work_item) {
+ {
+ mutex_lock l(mu_);
+ work_queue_.push_back(std::move(work_item));
+ }
+ cond_var_.notify_one();
+ }
+
+ private:
+ void WorkerLoop() {
+ while (true) {
+ std::function<void()> work_item = nullptr;
+ {
+ mutex_lock l(mu_);
+ while (!cancelled_ && work_queue_.empty()) {
+ cond_var_.wait(l);
+ }
+ if (cancelled_) {
+ return;
+ }
+ DCHECK(!work_queue_.empty());
+ work_item = std::move(work_queue_.front());
+ work_queue_.pop_front();
+ }
+ DCHECK(work_item != nullptr);
+ work_item();
+ }
+ }
+
+ std::unique_ptr<Thread> thread_;
+ mutex mu_;
+ condition_variable cond_var_;
+ bool cancelled_ GUARDED_BY(mu_) = false;
+ std::deque<std::function<void()>> work_queue_ GUARDED_BY(mu_);
+};
+
class ToSingleElementOp : public AsyncOpKernel {
public:
explicit ToSingleElementOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
- thread_pool_(new thread::ThreadPool(
- ctx->env(), ThreadOptions(),
- strings::StrCat("to_single_element_op_thread_",
- SanitizeThreadSuffix(name())),
- 1 /* num_threads */, false /* low_latency_hint */)) {}
+ background_worker_(ctx->env(),
+ strings::StrCat("to_single_element_op_thread_",
+ SanitizeThreadSuffix(name()))) {}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
// The call to `iterator->GetNext()` may block and depend on an
// inter-op thread pool thread, so we issue the call from the
// owned thread pool.
- thread_pool_->Schedule([ctx, done]() {
+ background_worker_.Schedule([ctx, done]() {
DatasetBase* dataset;
OP_REQUIRES_OK_ASYNC(
ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset), done);
@@ -729,18 +797,17 @@ class ToSingleElementOp : public AsyncOpKernel {
}
private:
- std::unique_ptr<thread::ThreadPool> thread_pool_;
+ BackgroundWorker background_worker_;
};
class OneShotIteratorOp : public AsyncOpKernel {
public:
explicit OneShotIteratorOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
- thread_pool_(new thread::ThreadPool(
- ctx->env(), ThreadOptions(),
+ background_worker_(
+ ctx->env(),
strings::StrCat("one_shot_iterator_initialization_thread_",
- SanitizeThreadSuffix(name())),
- 1 /* num_threads */, false /* low_latency_hint */)),
+ SanitizeThreadSuffix(name()))),
graph_def_version_(ctx->graph_def_version())
{
@@ -782,7 +849,7 @@ class OneShotIteratorOp : public AsyncOpKernel {
if (!initialization_started_) {
// TODO(mrry): Convert the initialization code to use
// callbacks instead of wasting a thread.
- thread_pool_->Schedule([this, ctx, done]() { Init(ctx, done); });
+ background_worker_.Schedule([this, ctx, done]() { Init(ctx, done); });
initialization_started_ = true;
} else {
done_callbacks_.emplace_back(ctx, std::move(done));
@@ -915,7 +982,7 @@ class OneShotIteratorOp : public AsyncOpKernel {
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
- std::unique_ptr<thread::ThreadPool> thread_pool_;
+ BackgroundWorker background_worker_;
mutex mu_;
ContainerInfo cinfo_ GUARDED_BY(mu_);
@@ -932,11 +999,9 @@ class IteratorGetNextOp : public AsyncOpKernel {
public:
explicit IteratorGetNextOp(OpKernelConstruction* ctx)
: AsyncOpKernel(ctx),
- thread_pool_(new thread::ThreadPool(
- ctx->env(), ThreadOptions(),
- strings::StrCat("iterator_get_next_thread_",
- SanitizeThreadSuffix(name())),
- 1 /* num_threads */, false /* low_latency_hint */)) {}
+ background_worker_(ctx->env(),
+ strings::StrCat("iterator_get_next_thread_",
+ SanitizeThreadSuffix(name()))) {}
void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override {
IteratorResource* iterator;
@@ -945,7 +1010,7 @@ class IteratorGetNextOp : public AsyncOpKernel {
// The call to `iterator->GetNext()` may block and depend on an
// inter-op thread pool thread, so we issue the call from the
// owned thread pool.
- thread_pool_->Schedule(std::bind(
+ background_worker_.Schedule(std::bind(
[ctx, iterator](DoneCallback done) {
std::vector<Tensor> components;
bool end_of_sequence = false;
@@ -982,7 +1047,7 @@ class IteratorGetNextOp : public AsyncOpKernel {
}
private:
- std::unique_ptr<thread::ThreadPool> thread_pool_;
+ BackgroundWorker background_worker_;
};
class IteratorGetNextSyncOp : public OpKernel {
diff --git a/tensorflow/core/kernels/data/optimize_dataset_op.cc b/tensorflow/core/kernels/data/optimize_dataset_op.cc
index 81be69105e..276f5f89c8 100644
--- a/tensorflow/core/kernels/data/optimize_dataset_op.cc
+++ b/tensorflow/core/kernels/data/optimize_dataset_op.cc
@@ -53,23 +53,30 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
OP_REQUIRES_OK(
ctx, ParseVectorArgument<string>(ctx, "optimizations", &optimizations));
Dataset* dataset =
- new Dataset(ctx, optimizations, output_types_, output_shapes_);
- OP_REQUIRES_OK(ctx, dataset->Optimize(ctx, input));
+ new Dataset(ctx, input, optimizations, output_types_, output_shapes_);
+ OP_REQUIRES_OK(ctx, dataset->Optimize(ctx));
*output = dataset;
}
private:
class Dataset : public GraphDatasetBase {
public:
- Dataset(OpKernelContext* ctx, const std::vector<string>& optimizations,
+ Dataset(OpKernelContext* ctx, const DatasetBase* input,
+ const std::vector<string>& optimizations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: GraphDatasetBase(ctx),
+ input_(input),
optimizations_(optimizations),
output_types_(output_types),
- output_shapes_(output_shapes) {}
+ output_shapes_(output_shapes) {
+ input_->Ref();
+ }
- ~Dataset() override { input_->Unref(); }
+ ~Dataset() override {
+ input_->Unref();
+ optimized_input_->Unref();
+ }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
@@ -77,15 +84,17 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
new Iterator({this, strings::StrCat(prefix, "::Optimize")}));
}
- Status Optimize(OpKernelContext* ctx, const DatasetBase* input) {
+ Status Optimize(OpKernelContext* ctx) {
GraphDefBuilder b;
DatasetGraphDefBuilder db(&b);
Node* input_node = nullptr;
- TF_RETURN_IF_ERROR(db.AddParentDataset(ctx, input, &input_node));
+ TF_RETURN_IF_ERROR(db.AddParentDataset(ctx, input_, &input_node));
string output_node = input_node->name();
GraphDef graph_def;
TF_RETURN_IF_ERROR(b.ToGraphDef(&graph_def));
+ VLOG(3) << "Before optimization: " << graph_def.DebugString();
TF_RETURN_IF_ERROR(ApplyOptimizations(ctx, &graph_def, &output_node));
+ VLOG(3) << "After optimization: " << graph_def.DebugString();
flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(),
graph_def.library()));
Graph graph(OpRegistry::Global());
@@ -94,8 +103,9 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
GraphRunner graph_runner(ctx->function_library()->device());
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, ctx->function_library(), {},
{output_node}, &outputs));
- TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(outputs[0], &input_));
- input_->Ref();
+ TF_RETURN_IF_ERROR(
+ GetDatasetFromVariantTensor(outputs[0], &optimized_input_));
+ optimized_input_->Ref();
return Status::OK();
}
@@ -127,7 +137,8 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
- return dataset()->input_->MakeIterator(ctx, prefix(), &input_impl_);
+ return dataset()->optimized_input_->MakeIterator(ctx, prefix(),
+ &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
@@ -199,6 +210,12 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
tensorflow::grappler::VirtualCluster cluster(device_map);
// Run optimizer.
+ if (VLOG_IS_ON(2)) {
+ LOG(INFO) << "Performing the following optimizations:";
+ for (const string& optimization : optimizations_) {
+ LOG(INFO) << " " << optimization;
+ }
+ }
TF_RETURN_IF_ERROR(tensorflow::grappler::RunMetaOptimizer(
*grappler_item, rewriter_config, ctx->device(), &cluster, graph_def));
@@ -213,8 +230,9 @@ class OptimizeDatasetOp : public UnaryDatasetOpKernel {
return Status::OK();
}
- DatasetBase* input_;
+ DatasetBase* optimized_input_;
std::shared_ptr<FunctionLibraryDefinition> flib_def_;
+ const DatasetBase* input_;
const std::vector<string> optimizations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
diff --git a/tensorflow/core/kernels/data/slide_dataset_op.cc b/tensorflow/core/kernels/data/slide_dataset_op.cc
index 07cc91f9d5..5765c61f30 100644
--- a/tensorflow/core/kernels/data/slide_dataset_op.cc
+++ b/tensorflow/core/kernels/data/slide_dataset_op.cc
@@ -12,6 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+
+#include <deque>
+#include <vector>
+
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/dataset.h"
@@ -33,36 +37,40 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override {
int64 window_size = 0;
- int64 stride = 0;
OP_REQUIRES_OK(
ctx, ParseScalarArgument<int64>(ctx, "window_size", &window_size));
- OP_REQUIRES_OK(ctx, ParseScalarArgument<int64>(ctx, "stride", &stride));
OP_REQUIRES(
ctx, window_size > 0,
errors::InvalidArgument("Window size must be greater than zero."));
- OP_REQUIRES(ctx, stride > 0,
- errors::InvalidArgument("Stride must be greater than zero."));
- if (stride == window_size) {
- LOG(WARNING) << "stride: " << stride
+ int64 window_shift = 0;
+ OP_REQUIRES_OK(
+ ctx, ParseScalarArgument<int64>(ctx, "window_shift", &window_shift));
+ OP_REQUIRES(
+ ctx, window_shift > 0,
+ errors::InvalidArgument("Window shift must be greater than zero."));
+ int64 window_stride = 0;
+ OP_REQUIRES_OK(
+ ctx, ParseScalarArgument<int64>(ctx, "window_stride", &window_stride));
+ OP_REQUIRES(
+ ctx, window_stride > 0,
+ errors::InvalidArgument("window_stride must be greater than zero."));
+ if (window_size == window_shift && window_stride == 1) {
+ LOG(WARNING) << "window_shift: " << window_shift
<< " is equal to window_size: " << window_size
- << ", to use `batch` instead.";
- } else if (stride > window_size) {
- LOG(WARNING) << "stride: " << stride
- << " is greater than window_size: " << window_size
- << ", you will lose some data.";
+ << " and window_stride is 1, use `batch` instead.";
}
-
- *output = new Dataset(ctx, window_size, stride, input);
+ *output = new Dataset(ctx, window_size, window_shift, window_stride, input);
}
private:
class Dataset : public GraphDatasetBase {
public:
- Dataset(OpKernelContext* ctx, int64 window_size, int64 stride,
- const DatasetBase* input)
+ Dataset(OpKernelContext* ctx, int64 window_size, int64 window_shift,
+ int64 window_stride, const DatasetBase* input)
: GraphDatasetBase(ctx),
window_size_(window_size),
- stride_(stride),
+ window_shift_(window_shift),
+ window_stride_(window_stride),
input_(input) {
input_->Ref();
@@ -91,8 +99,8 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
}
string DebugString() const override {
- return strings::StrCat("SlideDatasetOp(", window_size_, ", ", stride_,
- ")::Dataset");
+ return strings::StrCat("SlideDatasetOp(", window_size_, ", ",
+ window_shift_, ", ", window_stride_, ")::Dataset");
}
protected:
@@ -101,16 +109,18 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddParentDataset(ctx, input_, &input_graph_node));
Node* window_size = nullptr;
- Node* stride = nullptr;
+ Node* window_shift = nullptr;
+ Node* window_stride = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(window_size_, &window_size));
- TF_RETURN_IF_ERROR(b->AddScalar(stride_, &stride));
- TF_RETURN_IF_ERROR(
- b->AddDataset(this, {input_graph_node, window_size, stride}, output));
+ TF_RETURN_IF_ERROR(b->AddScalar(window_shift_, &window_shift));
+ TF_RETURN_IF_ERROR(b->AddScalar(window_stride_, &window_stride));
+ TF_RETURN_IF_ERROR(b->AddDataset(
+ this, {input_graph_node, window_size, window_shift, window_stride},
+ output));
return Status::OK();
}
private:
-
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
@@ -124,7 +134,8 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const int64 window_size = dataset()->window_size_;
- const int64 stride = dataset()->stride_;
+ const int64 window_shift = dataset()->window_shift_;
+ const int64 window_stride = dataset()->window_stride_;
std::vector<std::vector<Tensor>> batch_elements;
{
mutex_lock l(mu_);
@@ -133,55 +144,51 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
return Status::OK();
}
batch_elements.reserve(window_size);
- // Use cache if stride < window_size.
- if (stride < window_size) {
- const bool first_call = cache_.empty();
- if (first_call) {
- cache_.reserve(window_size);
- } else {
- // Reuse cache in the previous iteration.
- cache_.swap(batch_elements);
- }
- }
- // Fill up with new elements.
+
+ // Fill up buffer.
+ size_t target_size = TargetBufferSize(window_size, window_stride);
*end_of_sequence = false;
- for (size_t i = batch_elements.size(); i < window_size && !*end_of_sequence;
- ++i) {
- std::vector<Tensor> batch_element_tuple;
- TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
- end_of_sequence));
+ for (size_t i = buffer_.size(); i < target_size && !*end_of_sequence;
+ ++i) {
+ std::vector<Tensor> element;
+ TF_RETURN_IF_ERROR(
+ input_impl_->GetNext(ctx, &element, end_of_sequence));
if (!*end_of_sequence) {
- batch_elements.push_back(std::move(batch_element_tuple));
+ buffer_.push_back(std::move(element));
} else {
input_impl_.reset();
}
}
- // Drop the final smaller blocks.
- if (batch_elements.size() < window_size) {
+
+ // Drop the final smaller batch.
+ if (buffer_.size() < target_size) {
DCHECK(*end_of_sequence);
return Status::OK();
}
- if (stride < window_size) {
- // Cache the data used for the next iteration.
- for (size_t i = stride; i < window_size; ++i) {
- cache_.emplace_back(batch_elements[i]);
- }
- } else if (stride > window_size) {
- // Drop the data before the next iteration.
- std::vector<Tensor> batch_element_tuple;
- for (size_t i = window_size; i < stride && !*end_of_sequence; ++i) {
- TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx, &batch_element_tuple,
- end_of_sequence));
- if (*end_of_sequence) {
+ for (size_t i = 0; i < window_size; ++i) {
+ batch_elements.emplace_back(buffer_[window_stride * i]);
+ }
+
+ // Drop the data before the next iteration.
+ if (window_shift >= buffer_.size()) {
+ for (size_t i = buffer_.size(); i < window_shift; ++i) {
+ bool end_of_input;
+ std::vector<Tensor> element;
+ TF_RETURN_IF_ERROR(
+ input_impl_->GetNext(ctx, &element, &end_of_input));
+ if (end_of_input) {
input_impl_.reset();
+ break;
}
}
+ buffer_.clear();
+ } else {
+ buffer_.erase(buffer_.begin(), buffer_.begin() + window_shift);
}
}
// Construct output tensors.
- // Those codes below are copied from batch_dataset_op.cc.
const size_t num_tuple_components = batch_elements[0].size();
const int64 num_batch_elements = batch_elements.size();
for (size_t component_index = 0; component_index < num_tuple_components;
@@ -223,15 +230,15 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
} else {
TF_RETURN_IF_ERROR(SaveParent(writer, input_impl_));
}
- // Save cache.
- TF_RETURN_IF_ERROR(
- writer->WriteScalar(strings::StrCat("cache_size"), cache_.size()));
- for (int64 i = 0; i < cache_.size(); i++) {
+ // Save buffer.
+ TF_RETURN_IF_ERROR(writer->WriteScalar(strings::StrCat("buffer_size"),
+ buffer_.size()));
+ for (int64 i = 0; i < buffer_.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
- strings::StrCat("cache[", i, "]_size"), cache_[i].size()));
- for (int64 j = 0; j < cache_[i].size(); j++) {
+ strings::StrCat("buffer[", i, "]_size"), buffer_[i].size()));
+ for (int64 j = 0; j < buffer_[i].size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
- strings::StrCat("cache[", i, "][", j, "]"), cache_[i][j]));
+ strings::StrCat("buffer[", i, "][", j, "]"), buffer_[i][j]));
}
}
return Status::OK();
@@ -245,32 +252,37 @@ class SlideDatasetOp : public UnaryDatasetOpKernel {
} else {
input_impl_.reset();
}
- // Restore cache.
- int64 cache_size;
+ // Restore buffer.
+ int64 buffer_size;
TF_RETURN_IF_ERROR(
- reader->ReadScalar(strings::StrCat("cache_size"), &cache_size));
- cache_.resize(cache_size);
- for (int64 i = 0; i < cache_size; i++) {
+ reader->ReadScalar(strings::StrCat("buffer_size"), &buffer_size));
+ buffer_.resize(buffer_size);
+ for (int64 i = 0; i < buffer_size; i++) {
int64 vector_size;
TF_RETURN_IF_ERROR(reader->ReadScalar(
- strings::StrCat("cache[", i, "]_size"), &vector_size));
- cache_[i].resize(vector_size);
+ strings::StrCat("buffer[", i, "]_size"), &vector_size));
+ buffer_[i].resize(vector_size);
for (int64 j = 0; j < vector_size; j++) {
TF_RETURN_IF_ERROR(reader->ReadTensor(
- strings::StrCat("cache[", i, "][", j, "]"), &cache_[i][j]));
+ strings::StrCat("buffer[", i, "][", j, "]"), &buffer_[i][j]));
}
}
return Status::OK();
}
private:
+ size_t TargetBufferSize(int64 window_size, int64 window_stride) {
+ return (window_size - 1) * window_stride + 1;
+ }
+
mutex mu_;
- std::vector<std::vector<Tensor>> cache_ GUARDED_BY(mu_);
+ std::deque<std::vector<Tensor>> buffer_ GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ GUARDED_BY(mu_);
};
const int64 window_size_;
- const int64 stride_;
+ const int64 window_shift_;
+ const int64 window_stride_;
const DatasetBase* const input_;
std::vector<PartialTensorShape> output_shapes_;
};
diff --git a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
index 2604822cc9..b5dff48d2d 100644
--- a/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
+++ b/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc
@@ -252,10 +252,12 @@ class SparseTensorSliceDatasetOp : public DatasetOpKernel {
previous_batch_index = next_batch_index;
}
gtl::InlinedVector<int64, 8> std_order(dense_shape->NumElements(), 0);
- sparse::SparseTensor sparse_tensor(
- *indices, *values, TensorShape(dense_shape->vec<int64>()), std_order);
-
- *output = new Dataset<T>(ctx, sparse_tensor);
+ sparse::SparseTensor tensor;
+ OP_REQUIRES_OK(
+ ctx, sparse::SparseTensor::Create(
+ *indices, *values, TensorShape(dense_shape->vec<int64>()),
+ std_order, &tensor));
+ *output = new Dataset<T>(ctx, std::move(tensor));
}
private:
diff --git a/tensorflow/core/kernels/data/stats_dataset_ops.cc b/tensorflow/core/kernels/data/stats_dataset_ops.cc
index a537e7e68f..58ec3d4495 100644
--- a/tensorflow/core/kernels/data/stats_dataset_ops.cc
+++ b/tensorflow/core/kernels/data/stats_dataset_ops.cc
@@ -310,7 +310,7 @@ class FeatureStatsDatasetOp : public UnaryDatasetOpKernel {
for (const Tensor& t : *out_tensors) {
auto record_t = t.flat<string>();
Example example;
- // TODO(shivaniagrawal): redundant parsing here, potential solutions
+ // TODO(b/111553342): redundant parsing here, potential solutions
// to improve performance is to a) have a potential
// ParseExampleDataset and collect stats from there and b) make
// changes to parse_example() where it returns stats as well.
@@ -333,7 +333,6 @@ class FeatureStatsDatasetOp : public UnaryDatasetOpKernel {
return s;
}
- // TODO(shivaniagrawal): Add features/feature-values to streamz metrics.
int AddStatsFeatureValues(const Feature& feature) {
int feature_values_list_size = 0;
switch (feature.kind_case()) {
@@ -391,7 +390,7 @@ class FeatureStatsDatasetOp : public UnaryDatasetOpKernel {
for (const auto& feature_list :
example.feature_lists().feature_list()) {
- stats_aggregator->IncrementCounter("feature_lists_count", "reainer",
+ stats_aggregator->IncrementCounter("feature_lists_count", "trainer",
1);
for (const auto& feature : feature_list.second.feature()) {
feature_values_list_size_sum += AddStatsFeatureValues(feature);
diff --git a/tensorflow/core/kernels/decode_proto_op.cc b/tensorflow/core/kernels/decode_proto_op.cc
index 6d3dcc1c59..b54e1ea8ac 100644
--- a/tensorflow/core/kernels/decode_proto_op.cc
+++ b/tensorflow/core/kernels/decode_proto_op.cc
@@ -13,21 +13,19 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// DecodeProto is a TensorFlow Op which extracts arbitrary fields
-// from protos serialized as strings.
+// DecodeProto is a TensorFlow op which extracts arbitrary fields from protos
+// serialized as strings.
//
// See docs in ../ops/decode_proto_op.cc.
//
-// This implementation reads the serialized format using a handful of
-// calls from the WireFormatLite API used by generated proto code.
-// WireFormatLite is marked as an "internal" proto API but is widely
-// used in practice and highly unlikely to change.
-// This will be much faster than the previous implementation based on
-// constructing a temporary dynamic message in memory and using the
-// proto reflection api to read it.
-// It can be used with any proto whose descriptors are available at
-// runtime but should be competitive in speed with approaches that
-// compile in the proto definitions.
+// This implementation reads the serialized format using a handful of calls from
+// the WireFormatLite API used by generated proto code. WireFormatLite is marked
+// as an "internal" proto API but is widely used in practice and highly unlikely
+// to change. This will be much faster than the previous implementation based on
+// constructing a temporary dynamic message in memory and using the proto
+// reflection api to read it. It can be used with any proto whose descriptors
+// are available at runtime but should be competitive in speed with approaches
+// that compile in the proto definitions.
#include <memory>
#include <string>
@@ -36,11 +34,13 @@ limitations under the License.
#include "third_party/eigen3/Eigen/Core"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
+#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/proto/decode.h"
#include "tensorflow/core/util/proto/descriptors.h"
+#include "tensorflow/core/util/proto/proto_utils.h"
#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
@@ -58,53 +58,6 @@ using ::tensorflow::protobuf::io::CodedInputStream;
const bool kFailOnDecodeError = true;
-// Returns true if the proto field type can be converted to the
-// tensorflow::DataType.
-bool CheckOutputType(FieldDescriptor::Type field_type, DataType output_type) {
- switch (field_type) {
- case WireFormatLite::TYPE_DOUBLE:
- return output_type == tensorflow::DT_DOUBLE;
- case WireFormatLite::TYPE_FLOAT:
- return output_type == tensorflow::DT_FLOAT ||
- output_type == tensorflow::DT_DOUBLE;
- case WireFormatLite::TYPE_INT64:
- return output_type == tensorflow::DT_INT64;
- case WireFormatLite::TYPE_UINT64:
- return output_type == tensorflow::DT_INT64;
- case WireFormatLite::TYPE_INT32:
- return output_type == tensorflow::DT_INT32;
- case WireFormatLite::TYPE_FIXED64:
- return output_type == tensorflow::DT_INT64;
- case WireFormatLite::TYPE_FIXED32:
- return output_type == tensorflow::DT_INT32 ||
- output_type == tensorflow::DT_INT64;
- case WireFormatLite::TYPE_BOOL:
- return output_type == tensorflow::DT_BOOL;
- case WireFormatLite::TYPE_STRING:
- return output_type == tensorflow::DT_STRING;
- case WireFormatLite::TYPE_GROUP:
- return output_type == tensorflow::DT_STRING;
- case WireFormatLite::TYPE_MESSAGE:
- return output_type == tensorflow::DT_STRING;
- case WireFormatLite::TYPE_BYTES:
- return output_type == tensorflow::DT_STRING;
- case WireFormatLite::TYPE_UINT32:
- return output_type == tensorflow::DT_INT32 ||
- output_type == tensorflow::DT_INT64;
- case WireFormatLite::TYPE_ENUM:
- return output_type == tensorflow::DT_INT32;
- case WireFormatLite::TYPE_SFIXED32:
- return output_type == tensorflow::DT_INT32;
- case WireFormatLite::TYPE_SFIXED64:
- return output_type == tensorflow::DT_INT64;
- case WireFormatLite::TYPE_SINT32:
- return output_type == tensorflow::DT_INT32;
- case WireFormatLite::TYPE_SINT64:
- return output_type == tensorflow::DT_INT64;
- // default: intentionally omitted in order to enable static checking.
- }
-}
-
// Used to store the default value of a protocol message field, casted to the
// type of the output tensor.
//
@@ -113,13 +66,15 @@ struct DefaultValue {
DataType dtype = DataType::DT_INVALID;
union Value {
bool v_bool; // DT_BOOL
- uint8 v_uint8; // DT_UINT8
+ double v_double; // DT_DOUBLE
+ float v_float; // DT_FLOAT
int8 v_int8; // DT_INT8
int32 v_int32; // DT_INT32
int64 v_int64; // DT_INT64
- float v_float; // DT_FLOAT
- double v_double; // DT_DOUBLE
const char* v_string; // DT_STRING
+ uint8 v_uint8; // DT_UINT8
+ uint8 v_uint32; // DT_UINT32
+ uint8 v_uint64; // DT_UINT64
};
Value value;
};
@@ -138,23 +93,29 @@ Status InitDefaultValue(DataType dtype, const T value, DefaultValue* result) {
case DT_BOOL:
result->value.v_bool = static_cast<bool>(value);
break;
- case DT_INT32:
- result->value.v_int32 = static_cast<int32>(value);
+ case DT_DOUBLE:
+ result->value.v_double = static_cast<double>(value);
+ break;
+ case DT_FLOAT:
+ result->value.v_float = static_cast<float>(value);
break;
case DT_INT8:
result->value.v_int8 = static_cast<int8>(value);
break;
- case DT_UINT8:
- result->value.v_uint8 = static_cast<uint8>(value);
+ case DT_INT32:
+ result->value.v_int32 = static_cast<int32>(value);
break;
case DT_INT64:
result->value.v_int64 = static_cast<int64>(value);
break;
- case DT_FLOAT:
- result->value.v_float = static_cast<float>(value);
+ case DT_UINT8:
+ result->value.v_uint8 = static_cast<uint8>(value);
break;
- case DT_DOUBLE:
- result->value.v_double = static_cast<double>(value);
+ case DT_UINT32:
+ result->value.v_uint32 = static_cast<uint32>(value);
+ break;
+ case DT_UINT64:
+ result->value.v_uint64 = static_cast<uint64>(value);
break;
default:
// We should never get here, given the type checking that occurs earlier.
@@ -241,13 +202,11 @@ struct FieldInfo {
number = field_desc->number();
// The wire format library defines the same constants used in
- // descriptor.proto. This static_cast is safe because they
- // are guaranteed to stay in sync.
- // We need the field type from the FieldDescriptor here
- // because the wire format doesn't tell us anything about
- // what happens inside a packed repeated field: there is
- // enough information in the wire format to skip the
- // whole field but not enough to know how to parse what's
+ // descriptor.proto. This static_cast is safe because they are guaranteed to
+ // stay in sync. We need the field type from the FieldDescriptor here
+ // because the wire format doesn't tell us anything about what happens
+ // inside a packed repeated field: there is enough information in the wire
+ // format to skip the whole field but not enough to know how to parse what's
// inside. For that we go to the schema.
type = static_cast<WireFormatLite::FieldType>(field_desc->type());
is_repeated = field_desc->is_repeated();
@@ -257,16 +216,15 @@ struct FieldInfo {
FieldInfo(const FieldInfo&) = delete;
FieldInfo& operator=(const FieldInfo&) = delete;
- // Internally we sort field descriptors by wire number for
- // fast lookup. In general this is different from the order
- // given by the user. Output_index gives the index into
- // the field_names and output_types attributes and into
+ // Internally we sort field descriptors by wire number for fast lookup. In
+ // general this is different from the order given by the user. Output_index
+ // gives the index into the field_names and output_types attributes and into
// the output tensor list.
int output_index = -1;
- // This is a cache of the relevant fields from `FieldDescriptorProto`.
- // This was added after noticing that FieldDescriptor->type() was
- // using 6% of the cpu profile.
+ // This is a cache of the relevant fields from `FieldDescriptorProto`. This
+ // was added after noticing that FieldDescriptor->type() was using 6% of the
+ // cpu profile.
WireFormatLite::FieldType type;
int number;
bool is_repeated;
@@ -275,16 +233,16 @@ struct FieldInfo {
// A CountCollector counts sizes of repeated and optional fields in a proto.
//
-// Each field is tracked by a single CountCollector instance. The
-// instance manages a single count, which is stored as a pointer (it
-// is intended to be a reference to the `sizes` output which is being
-// filled in). The pointer is passed in at initialization.
+// Each field is tracked by a single CountCollector instance. The instance
+// manages a single count, which is stored as a pointer (it is intended to be a
+// reference to the `sizes` output which is being filled in). The pointer is
+// passed in at initialization.
//
-// Counting is done as a separate pass in order to allocate output tensors
-// all at once. This allows the TensorFlow runtime to optimize allocation
-// for the consumer, while removing the need for copying inside this op.
-// After this pass, the DenseCollector class (below) gathers the data:
-// It is more complex and provides better motivation for the API here.
+// Counting is done as a separate pass in order to allocate output tensors all
+// at once. This allows the TensorFlow runtime to optimize allocation for the
+// consumer, while removing the need for copying inside this op. After this
+// pass, the DenseCollector class (below) gathers the data: it is more complex
+// and provides better motivation for the API here.
class CountCollector {
public:
CountCollector() = delete;
@@ -298,8 +256,8 @@ class CountCollector {
if (*count_ptr_ == 0 || field.is_repeated) {
(*count_ptr_)++;
}
- // We expect a wire type based on the schema field_type, to allow
- // a little more checking.
+ // We expect a wire type based on the schema field_type, to allow a little
+ // more checking.
if (!SkipValue(input, field)) {
return errors::DataLoss("ReadValue: Failed skipping field when counting");
}
@@ -329,8 +287,8 @@ class CountCollector {
return errors::DataLoss("ReadPackedValues: Skipping packed field failed");
}
- // Dispatch to the appropriately typed field reader based on the
- // schema type.
+ // Dispatch to the appropriately typed field reader based on the schema
+ // type.
Status st;
switch (field.type) {
case WireFormatLite::TYPE_DOUBLE:
@@ -409,18 +367,17 @@ class CountCollector {
return input->Skip(length);
}
- // Counts the number of packed varints in an array.
- // The end of a varint is signaled by a value < 0x80,
- // so counting them requires parsing the bytestream.
- // It is the caller's responsibility to ensure that len > 0.
+ // Counts the number of packed varints in an array. The end of a varint is
+ // signaled by a value < 0x80, so counting them requires parsing the
+ // bytestream. It is the caller's responsibility to ensure that len > 0.
Status CountPackedVarint(const uint8* buf, size_t len) {
const uint8* bound = buf + len;
int count;
- // The last byte in a valid encoded varint is guaranteed to have
- // the high bit unset. We rely on this property to prevent
- // ReadVarint64FromArray from going out of bounds, so validate
- // the end of the buf before scanning anything.
+ // The last byte in a valid encoded varint is guaranteed to have the high
+ // bit unset. We rely on this property to prevent ReadVarint64FromArray from
+ // going out of bounds, so validate the end of the buf before scanning
+ // anything.
if (bound[-1] & 0x80) {
return errors::DataLoss("Corrupt packed varint");
}
@@ -439,8 +396,8 @@ class CountCollector {
return Status::OK();
}
- // Counts the number of fixed-size values in a packed field.
- // This can be done without actually parsing anything.
+ // Counts the number of fixed-size values in a packed field. This can be done
+ // without actually parsing anything.
template <typename T>
Status CountPackedFixed(const uint8* unused_buf, size_t len) {
int count = len / sizeof(T);
@@ -452,10 +409,9 @@ class CountCollector {
return Status::OK();
}
- // Skips a single value in the input stream.
- // Dispatches to the appropriately typed field skipper based on the
- // schema type tag.
- // This is not as permissive as just handling the wire type.
+ // Skips a single value in the input stream. Dispatches to the appropriately
+ // typed field skipper based on the schema type tag. This is not as permissive
+ // as just handling the wire type.
static bool SkipValue(CodedInputStream* input, const FieldInfo& field) {
uint32 tmp32;
protobuf_uint64 tmp64;
@@ -507,13 +463,13 @@ class CountCollector {
// A DenseCollector accumulates values from a proto into a tensor.
//
-// There is an instance of DenseCollector for each field of each
-// proto. The DenseCollector deserializes the value from the wire
-// directly into the preallocated output Tensor.
+// There is an instance of DenseCollector for each field of each proto. The
+// DenseCollector deserializes the value from the wire directly into the
+// preallocated output Tensor.
//
-// This class is named DenseCollector because in the future there should
-// be a SparseCollector that accumulates field data into sparse tensors if
-// the user requests it.
+// This class is named DenseCollector because in the future there should be a
+// SparseCollector that accumulates field data into sparse tensors if the user
+// requests it.
class DenseCollector {
public:
DenseCollector() = delete;
@@ -578,40 +534,43 @@ class DenseCollector {
}
}
- // Fills in any missing values in the output array with defaults.
- // Dispatches to the appropriately typed field default based on the
- // runtime type tag.
+ // Fills in any missing values in the output array with defaults. Dispatches
+ // to the appropriately typed field default based on the runtime type tag.
Status FillWithDefaults() {
switch (default_value_.dtype) {
+ case DataType::DT_BOOL:
+ return FillDefault<bool>(default_value_.value.v_bool);
case DataType::DT_FLOAT:
return FillDefault<float>(default_value_.value.v_float);
case DataType::DT_DOUBLE:
return FillDefault<double>(default_value_.value.v_double);
- case DataType::DT_INT32:
- return FillDefault<int32>(default_value_.value.v_int32);
- case DataType::DT_UINT8:
- return FillDefault<uint8>(default_value_.value.v_uint8);
case DataType::DT_INT8:
return FillDefault<int8>(default_value_.value.v_int8);
- case DataType::DT_STRING:
- return FillDefault<string>(default_value_.value.v_string);
+ case DataType::DT_INT32:
+ return FillDefault<int32>(default_value_.value.v_int32);
case DataType::DT_INT64:
return FillDefault<int64>(default_value_.value.v_int64);
- case DataType::DT_BOOL:
- return FillDefault<bool>(default_value_.value.v_bool);
+ case DataType::DT_STRING:
+ return FillDefault<string>(default_value_.value.v_string);
+ case DataType::DT_UINT8:
+ return FillDefault<uint8>(default_value_.value.v_uint8);
+ case DataType::DT_UINT32:
+ return FillDefault<uint32>(default_value_.value.v_uint32);
+ case DataType::DT_UINT64:
+ return FillDefault<uint64>(default_value_.value.v_uint64);
default:
// There are many tensorflow dtypes not handled here, but they
// should not come up unless type casting is added to the Op.
// Chaining with tf.cast() should do the right thing until then.
- return errors::DataLoss(
- "Failed filling defaults in unknown tf::DataType");
+ return errors::DataLoss("Failed filling defaults for ",
+ DataTypeString(default_value_.dtype));
}
}
private:
- // Fills empty values in the dense representation with a
- // default value. This uses next_repeat_index_ which counts the number
- // of parsed values for the field.
+ // Fills empty values in the dense representation with a default value. This
+ // uses next_repeat_index_ which counts the number of parsed values for the
+ // field.
template <class T>
Status FillDefault(const T& default_value) {
for (int i = next_repeat_index_; i < max_repeat_count_; i++) {
@@ -622,11 +581,10 @@ class DenseCollector {
int32 next_repeat_index_ = 0;
- // This is a pointer to data_[message_index_].
- // There is no bounds checking at this level: we computed the max
- // repeat size for each field in CountCollector and use the same
- // code to traverse it here, so we are guaranteed not to be called
- // for more items than we have allocated space.
+ // This is a pointer to data_[message_index_]. There is no bounds checking at
+ // this level: we computed the max repeat size for each field in
+ // CountCollector and use the same code to traverse it here, so we are
+ // guaranteed not to be called for more items than we have allocated space.
void* const datap_ = nullptr;
const DefaultValue default_value_;
@@ -665,7 +623,6 @@ class DecodeProtoOp : public OpKernel {
"have the same length"));
// Gather the field descriptors and check that requested output types match.
-
int field_index = 0;
std::vector<const FieldDescriptor*> field_descs;
for (const string& name : field_names) {
@@ -673,18 +630,16 @@ class DecodeProtoOp : public OpKernel {
OP_REQUIRES(context, fd != nullptr,
errors::InvalidArgument("Unknown field: ", name,
" in message type ", message_type));
- OP_REQUIRES(context,
- CheckOutputType(fd->type(), output_types[field_index]),
- // Many TensorFlow types don't have corresponding proto types
- // and the user will get an error if they are requested. It
- // would be nice to allow conversions here, but tf.cast
- // already exists so we don't duplicate the functionality.
- // Known unhandled types:
- // DT_INT16 DT_COMPLEX64 DT_QINT8 DT_QUINT8 DT_QINT32
- // DT_BFLOAT16 DT_QINT16 DT_QUINT16 DT_UINT16
- errors::InvalidArgument("Unexpected output type for ",
- fd->full_name(), ": ", fd->cpp_type(),
- " to ", output_types[field_index]));
+ OP_REQUIRES(
+ context,
+ proto_utils::IsCompatibleType(fd->type(), output_types[field_index]),
+ // Many TensorFlow types don't have corresponding proto types and the
+ // user will get an error if they are requested. It would be nice to
+ // allow conversions here, but tf.cast already exists so we don't
+ // duplicate the functionality.
+ errors::InvalidArgument("Unexpected output type for ",
+ fd->full_name(), ": ", fd->cpp_type(), " to ",
+ output_types[field_index]));
field_index++;
field_descs.push_back(fd);
@@ -726,10 +681,9 @@ class DecodeProtoOp : public OpKernel {
errors::InvalidArgument("format must be one of binary or text"));
is_binary_ = format == "binary";
- // Enable the initial protobuf sanitizer, which is much
- // more expensive than the decoder.
- // TODO(nix): Remove this once the fast decoder
- // has passed security review.
+ // Enable the initial protobuf sanitizer, which is much more expensive than
+ // the decoder.
+ // TODO(nix): Remove this once the fast decoder has passed security review.
OP_REQUIRES_OK(context, context->GetAttr("sanitize", &sanitize_));
}
@@ -742,9 +696,9 @@ class DecodeProtoOp : public OpKernel {
int field_count = fields_.size();
- // Save the argument shape for later, then flatten the input
- // Tensor since we are working componentwise. We will restore
- // the same shape in the returned Tensor.
+ // Save the argument shape for later, then flatten the input Tensor since we
+ // are working componentwise. We will restore the same shape in the returned
+ // Tensor.
const TensorShape& shape_prefix = buf_tensor.shape();
TensorShape sizes_shape = shape_prefix;
@@ -752,8 +706,8 @@ class DecodeProtoOp : public OpKernel {
Tensor* sizes_tensor = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, sizes_shape, &sizes_tensor));
- // This is used to allocate binary bufs if used. It serves only
- // to define memory ownership.
+ // This is used to allocate binary bufs if used. It serves only to define
+ // memory ownership.
std::vector<string> tmp_binary_bufs(message_count);
// These are the actual buffers to use, which may be in tmp_binary_bufs
@@ -768,8 +722,8 @@ class DecodeProtoOp : public OpKernel {
bufs.push_back(buf);
}
} else {
- // We will have to allocate a copy, either to convert from text to
- // binary or to sanitize a binary proto.
+ // We will have to allocate a copy, either to convert from text to binary
+ // or to sanitize a binary proto.
for (int mi = 0; mi < message_count; ++mi) {
ReserializeMessage(ctx, buf_tensor.flat<string>()(mi),
&tmp_binary_bufs[mi]);
@@ -780,16 +734,14 @@ class DecodeProtoOp : public OpKernel {
}
}
- // Walk through all the strings in the input tensor, counting
- // the number of fields in each.
- // We can't allocate our actual output Tensor until we know the
- // maximum repeat count, so we do a first pass through the serialized
- // proto just counting fields.
- // We always allocate at least one value so that optional fields
- // are populated with default values - this avoids a TF
- // conditional when handling the output data.
- // The caller can distinguish between real data and defaults
- // using the repeat count matrix that is returned by decode_proto.
+ // Walk through all the strings in the input tensor, counting the number of
+ // fields in each. We can't allocate our actual output Tensor until we know
+ // the maximum repeat count, so we do a first pass through the serialized
+ // proto just counting fields. We always allocate at least one value so that
+ // optional fields are populated with default values - this avoids a TF
+ // conditional when handling the output data. The caller can distinguish
+ // between real data and defaults using the repeat count matrix that is
+ // returned by decode_proto.
std::vector<int32> max_sizes(field_count, 1);
for (int mi = 0; mi < message_count; ++mi) {
CountFields(ctx, mi, *bufs[mi], sizes_tensor, &max_sizes);
@@ -814,14 +766,12 @@ class DecodeProtoOp : public OpKernel {
// REGISTER_OP(...)
// .Attr("output_types: list(type) >= 0")
// .Output("values: output_types")
- OP_REQUIRES_OK(ctx,
- // ctx->allocate_output(output_indices_[fi] + 1,
- ctx->allocate_output(fields_[fi]->output_index + 1,
- out_shape, &outputs[fi]));
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(fields_[fi]->output_index + 1,
+ out_shape, &outputs[fi]));
}
- // Make the second pass through the serialized proto, decoding
- // into preallocated tensors.
+ // Make the second pass through the serialized proto, decoding into
+ // preallocated tensors.
AccumulateFields(ctx, bufs, outputs);
}
@@ -976,6 +926,7 @@ class DecodeProtoOp : public OpKernel {
// Look up the FieldDescriptor for a particular field number.
bool LookupField(int field_number, int* field_index) {
// Look up the FieldDescriptor using linear search.
+ //
// TODO(nix): this could be sped up with binary search, but we are
// already way off the fastpath at this point. If you see a hotspot
// here, somebody is sending you very inefficient protos.
@@ -1010,6 +961,7 @@ class DecodeProtoOp : public OpKernel {
// This takes advantage of the sorted field numbers in most serialized
// protos: it tries the next expected field first rather than doing
// a lookup by field number.
+ //
// TODO(nix): haberman@ suggests a hybrid approach with a lookup table
// for small field numbers and a hash table for larger ones. This would
// be a simpler approach that should offer comparable speed in most
@@ -1029,9 +981,9 @@ class DecodeProtoOp : public OpKernel {
last_good_field_index = field_index;
}
} else {
- // If we see a field that is past the next field we want,
- // it was empty. Look for the one after that.
- // Repeat until we run out of fields that we care about.
+ // If we see a field that is past the next field we want, it was
+ // empty. Look for the one after that. Repeat until we run out of
+ // fields that we care about.
while (field_number >= next_good_field_number) {
if (field_number == next_good_field_number) {
last_good_field_number = field_number;
@@ -1044,10 +996,9 @@ class DecodeProtoOp : public OpKernel {
next_good_field_number =
fields_[last_good_field_index + 1]->number;
} else {
- // Saw something past the last field we care about.
- // Continue parsing the message just in case there
- // are disordered fields later, but any remaining
- // ordered fields will have no effect.
+ // Saw something past the last field we care about. Continue
+ // parsing the message just in case there are disordered fields
+ // later, but any remaining ordered fields will have no effect.
next_good_field_number = INT_MAX;
}
}
@@ -1077,20 +1028,20 @@ class DecodeProtoOp : public OpKernel {
WireFormatLite::WireType wire_type,
CodedInputStream* input, CollectorClass* collector) {
// The wire format library defines the same constants used in
- // descriptor.proto. This static_cast is safe because they
- // are guaranteed to stay in sync.
- // We need the field type from the FieldDescriptor here
- // because the wire format doesn't tell us anything about
- // what happens inside a packed repeated field: there is
- // enough information in the wire format to skip the
- // whole field but not enough to know how to parse what's
- // inside. For that we go to the schema.
+ // descriptor.proto. This static_cast is safe because they are guaranteed to
+ // stay in sync.
+ //
+ // We need the field type from the FieldDescriptor here because the wire
+ // format doesn't tell us anything about what happens inside a packed
+ // repeated field: there is enough information in the wire format to skip
+ // the whole field but not enough to know how to parse what's inside. For
+ // that we go to the schema.
WireFormatLite::WireType schema_wire_type =
WireFormatLite::WireTypeForFieldType(field.type);
- // Handle packed repeated fields. SkipField would skip the
- // whole length-delimited blob without letting us count the
- // values, so we have to scan them ourselves.
+ // Handle packed repeated fields. SkipField would skip the whole
+ // length-delimited blob without letting us count the values, so we have to
+ // scan them ourselves.
if (wire_type == WireFormatLite::WIRETYPE_LENGTH_DELIMITED &&
schema_wire_type != WireFormatLite::WIRETYPE_LENGTH_DELIMITED) {
// Handle packed repeated primitives.
@@ -1098,11 +1049,7 @@ class DecodeProtoOp : public OpKernel {
if (!input->ReadVarintSizeAsInt(&length)) {
return errors::DataLoss("CollectField: Failed reading packed size");
}
- Status st = collector->ReadPackedValues(input, field, length);
- if (!st.ok()) {
- return st;
- }
- return Status::OK();
+ return collector->ReadPackedValues(input, field, length);
}
// Read ordinary values, including strings, bytes, and messages.
@@ -1118,9 +1065,9 @@ class DecodeProtoOp : public OpKernel {
}
string message_type_;
- // Note that fields are sorted by increasing field number,
- // which is not in general the order given by the user-specified
- // field_names and output_types Op attributes.
+ // Note that fields are sorted by increasing field number, which is not in
+ // general the order given by the user-specified field_names and output_types
+ // Op attributes.
std::vector<std::unique_ptr<const FieldInfo>> fields_;
// Owned_desc_pool_ is null when using descriptor_source=local.
@@ -1131,12 +1078,12 @@ class DecodeProtoOp : public OpKernel {
// True if decoding binary format, false if decoding text format.
bool is_binary_;
- // True if the protos should be sanitized before parsing.
- // Enables the initial protobuf sanitizer, which is much
- // more expensive than the decoder. The flag defaults to true
- // but can be set to false for trusted sources.
- // TODO(nix): flip the default to false when the fast decoder
- // has passed security review.
+ // True if the protos should be sanitized before parsing. Enables the initial
+ // protobuf sanitizer, which is much more expensive than the decoder. The flag
+ // defaults to true but can be set to false for trusted sources.
+ //
+ // TODO(nix): Flip the default to false when the fast decoder has passed
+ // security review.
bool sanitize_;
TF_DISALLOW_COPY_AND_ASSIGN(DecodeProtoOp);
diff --git a/tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc b/tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc
index 5390222b3a..2a25459194 100644
--- a/tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc
+++ b/tensorflow/core/kernels/depthwise_conv_op_gpu.cu.cc
@@ -165,15 +165,18 @@ __global__ void __launch_bounds__(1024, 2)
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180°.
+// T is the tensors' data type. S is the math type the kernel uses. This is the
+// same as T for all cases but pseudo half (which has T=Eigen::half, S=float).
template <typename T, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
- bool kKnownEvenHeight>
+ bool kKnownEvenHeight, typename S>
__global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNHWCSmall(
const DepthwiseArgs args, const T* input, const T* filter, T* output) {
assert(CanLaunchDepthwiseConv2dGPUSmall(args));
// Holds block plus halo and filter data for blockDim.x depths.
- extern __shared__ __align__(sizeof(T)) unsigned char shared_memory[];
- T* const shared_data = reinterpret_cast<T*>(shared_memory);
+ extern __shared__ __align__(8) unsigned char shared_memory[];
+ static_assert(sizeof(S) <= 8, "Insufficient alignement detected");
+ S* const shared_data = reinterpret_cast<S*>(shared_memory);
const int num_batches = args.batch;
const int in_height = args.in_rows;
@@ -219,7 +222,7 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNHWCSmall(
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
- shared_data[i] = T(0);
+ shared_data[i] = S();
}
__syncthreads();
@@ -254,14 +257,15 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNHWCSmall(
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
- T* const tile_ptr = tile_idx + shared_data;
- tile_ptr[0] = ldg(in_ptr);
+ S* const tile_ptr = tile_idx + shared_data;
+ tile_ptr[0] = static_cast<S>(ldg(in_ptr));
if (!skip_second) {
- tile_ptr[tile_offset] = ldg(tensor_offset + in_ptr);
+ tile_ptr[tile_offset] = static_cast<S>(ldg(tensor_offset + in_ptr));
}
if (filter_write_offset != 0) {
- shared_data[filter_write_offset] = ldg(filter_offset + filter);
+ shared_data[filter_write_offset] =
+ static_cast<S>(ldg(filter_offset + filter));
}
}
@@ -269,17 +273,17 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNHWCSmall(
__syncthreads();
if (channel_in_range) {
- T sum1 = static_cast<T>(0);
- T sum2 = static_cast<T>(0);
+ S sum1 = S();
+ S sum2 = S();
int shared_offset = data_idx;
- const T* filter_ptr = filter_read_offset + shared_data;
+ const S* filter_ptr = filter_read_offset + shared_data;
UNROLL for (int r = 0; r < filter_height; ++r) {
UNROLL for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
- const T filter_value = *filter_ptr;
- const T* const tile_ptr = shared_offset + shared_data;
+ const S filter_value = *filter_ptr;
+ const S* const tile_ptr = shared_offset + shared_data;
sum1 += filter_value * tile_ptr[0];
sum2 += filter_value * tile_ptr[tile_offset];
shared_offset += kBlockDepth;
@@ -290,9 +294,9 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNHWCSmall(
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
- out_ptr[0] = sum1;
+ out_ptr[0] = static_cast<T>(sum1);
if (!skip_second) {
- out_ptr[tensor_offset] = sum2;
+ out_ptr[tensor_offset] = static_cast<T>(sum2);
}
}
@@ -445,15 +449,18 @@ __global__ void __launch_bounds__(1024, 2)
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180°.
+// T is the tensors' data type. S is the math type the kernel uses. This is the
+// same as T for all cases but pseudo half (which has T=Eigen::half, S=float).
template <typename T, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
- bool kKnownEvenHeight>
+ bool kKnownEvenHeight, typename S>
__global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNCHWSmall(
const DepthwiseArgs args, const T* input, const T* filter, T* output) {
assert(CanLaunchDepthwiseConv2dGPUSmall(args));
// Holds block plus halo and filter data for blockDim.z depths.
- extern __shared__ __align__(sizeof(T)) unsigned char shared_memory[];
- T* const shared_data = reinterpret_cast<T*>(shared_memory);
+ extern __shared__ __align__(8) unsigned char shared_memory[];
+ static_assert(sizeof(S) <= 8, "Insufficient alignement detected");
+ S* const shared_data = reinterpret_cast<S*>(shared_memory);
const int num_batches = args.batch;
const int in_height = args.in_rows;
@@ -498,7 +505,7 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNCHWSmall(
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
- shared_data[i] = T(0);
+ shared_data[i] = S();
}
__syncthreads();
@@ -534,34 +541,35 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNCHWSmall(
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
- T* const tile_ptr = tile_idx + shared_data;
- tile_ptr[0] = ldg(in_ptr);
+ S* const tile_ptr = tile_idx + shared_data;
+ tile_ptr[0] = static_cast<S>(ldg(in_ptr));
if (!skip_second) {
- tile_ptr[tile_offset] = ldg(block_pixels + in_ptr);
+ tile_ptr[tile_offset] = static_cast<S>(ldg(block_pixels + in_ptr));
}
}
if (filter_write_offset != 0) {
const int filter_offset =
filter_idx + (channel + filter_channel) % in_depth;
- shared_data[filter_write_offset] = ldg(filter_offset + filter);
+ shared_data[filter_write_offset] =
+ static_cast<S>(ldg(filter_offset + filter));
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
- T sum1 = static_cast<T>(0);
- T sum2 = static_cast<T>(0);
+ S sum1 = S();
+ S sum2 = S();
int shared_offset = data_idx;
- const T* filter_ptr = filter_read_offset + shared_data;
+ const S* filter_ptr = filter_read_offset + shared_data;
UNROLL for (int r = 0; r < filter_height; ++r) {
UNROLL for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
- const T filter_value = *filter_ptr;
- const T* const tile_ptr = shared_offset + shared_data;
+ const S filter_value = *filter_ptr;
+ const S* const tile_ptr = shared_offset + shared_data;
sum1 += filter_value * tile_ptr[0];
sum2 += filter_value * tile_ptr[tile_offset];
++shared_offset;
@@ -572,9 +580,9 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNCHWSmall(
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
- out_ptr[0] = sum1;
+ out_ptr[0] = static_cast<T>(sum1);
if (!skip_second) {
- out_ptr[block_pixels] = sum2;
+ out_ptr[block_pixels] = static_cast<T>(sum2);
}
}
@@ -585,11 +593,11 @@ __global__ __launch_bounds__(1024, 2) void DepthwiseConv2dGPUKernelNCHWSmall(
template <typename T, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
- bool kKnownEvenHeight>
-void LaunchDepthwiseConv2dGPUSmall(const GpuDevice& device,
- const DepthwiseArgs& args, const T* input,
- const T* filter, T* output,
- TensorFormat data_format) {
+ bool kKnownEvenHeight, typename S>
+Status LaunchDepthwiseConv2dGPUSmall(OpKernelContext* ctx,
+ const DepthwiseArgs& args, const T* input,
+ const T* filter, T* output,
+ TensorFormat data_format) {
const int block_height = (args.in_rows + 1) / 2;
dim3 block_dim;
int block_count;
@@ -602,7 +610,7 @@ void LaunchDepthwiseConv2dGPUSmall(const GpuDevice& device,
kernel =
DepthwiseConv2dGPUKernelNHWCSmall<T, kDirection, kKnownFilterWidth,
kKnownFilterHeight, kBlockDepth,
- kKnownEvenHeight>;
+ kKnownEvenHeight, S>;
break;
case FORMAT_NCHW:
block_dim = dim3(args.in_cols, block_height, kBlockDepth);
@@ -611,73 +619,126 @@ void LaunchDepthwiseConv2dGPUSmall(const GpuDevice& device,
kernel =
DepthwiseConv2dGPUKernelNCHWSmall<T, kDirection, kKnownFilterWidth,
kKnownFilterHeight, kBlockDepth,
- kKnownEvenHeight>;
+ kKnownEvenHeight, S>;
break;
default:
- LOG(ERROR) << "FORMAT_" << ToString(data_format) << " is not supported";
- return;
+ return errors::InvalidArgument("FORMAT_", ToString(data_format),
+ " is not supported");
}
const int tile_width = args.in_cols + args.filter_cols - 1;
const int tile_height = block_height * 2 + args.filter_rows - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = args.filter_rows * args.filter_cols;
const int shared_memory_size =
- kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
+ kBlockDepth * (tile_pixels + filter_pixels) * sizeof(S);
const int num_outputs = args.out_rows * args.out_cols * block_count;
+ auto device = ctx->eigen_gpu_device();
CudaLaunchConfig config = GetCudaLaunchConfigFixedBlockSize(
num_outputs, device, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
kernel<<<config.block_count, block_dim, shared_memory_size,
device.stream()>>>(args, input, filter, output);
+ return Status::OK();
+}
+
+namespace detail {
+template <typename T>
+struct PseudoHalfType {
+ using Type = T;
+};
+template <>
+struct PseudoHalfType<Eigen::half> {
+ using Type = float;
+};
+} // namespace detail
+
+namespace {
+// Maps to float if T is __half, and to T otherwise.
+template <typename T>
+using PseudoHalfType = typename detail::PseudoHalfType<T>::Type;
+
+// Returns whether the context's GPU supports efficient fp16 math.
+bool HasFastHalfMath(OpKernelContext* ctx) {
+ int major, minor;
+ ctx->op_device_context()
+ ->stream()
+ ->parent()
+ ->GetDeviceDescription()
+ .cuda_compute_capability(&major, &minor);
+ auto cuda_arch = major * 100 + minor * 10;
+ // GPUs before sm_53 don't support fp16 math, and sm_61's fp16 math is slow.
+ return cuda_arch >= 530 && cuda_arch != 610;
+}
+} // namespace
+
+template <typename T, DepthwiseConv2dDirection kDirection,
+ int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
+ bool kKnownEvenHeight>
+Status LaunchDepthwiseConv2dGPUSmall(OpKernelContext* ctx,
+ const DepthwiseArgs& args, const T* input,
+ const T* filter, T* output,
+ TensorFormat data_format) {
+#if !defined __CUDA_ARCH__ || __CUDA_ARCH__ >= 530
+ if (HasFastHalfMath(ctx)) {
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, kBlockDepth,
+ kKnownEvenHeight, T>(
+ ctx, args, input, filter, output, data_format);
+ }
+#endif
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, kBlockDepth,
+ kKnownEvenHeight, PseudoHalfType<T>>(
+ ctx, args, input, filter, output, data_format);
}
template <typename T, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
-void LaunchDepthwiseConv2dGPUSmall(const GpuDevice& device,
- const DepthwiseArgs& args, const T* input,
- const T* filter, T* output,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dGPUSmall(OpKernelContext* ctx,
+ const DepthwiseArgs& args, const T* input,
+ const T* filter, T* output,
+ TensorFormat data_format) {
if (args.in_rows & 1) {
- LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
- kKnownFilterHeight, kBlockDepth, false>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, kBlockDepth,
+ false>(ctx, args, input, filter,
+ output, data_format);
} else {
- LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
- kKnownFilterHeight, kBlockDepth, true>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, kBlockDepth, true>(
+ ctx, args, input, filter, output, data_format);
}
}
template <typename T, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
-void LaunchDepthwiseConv2dGPUSmall(const GpuDevice& device,
- const DepthwiseArgs& args, const T* input,
- const T* filter, T* output,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dGPUSmall(OpKernelContext* ctx,
+ const DepthwiseArgs& args, const T* input,
+ const T* filter, T* output,
+ TensorFormat data_format) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (args.in_rows + 1) / 2 * args.in_cols;
if (block_pixels > 256) {
- LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
- kKnownFilterHeight, 2>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, 2>(
+ ctx, args, input, filter, output, data_format);
} else if (block_pixels > 128) {
- LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
- kKnownFilterHeight, 4>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, 4>(
+ ctx, args, input, filter, output, data_format);
} else {
- LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
- kKnownFilterHeight, 8>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPUSmall<T, kDirection, kKnownFilterWidth,
+ kKnownFilterHeight, 8>(
+ ctx, args, input, filter, output, data_format);
}
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
int kKnownDepthMultiplier>
-void LaunchDepthwiseConv2dGPU(const GpuDevice& device,
- const DepthwiseArgs& args, const T* input,
- const T* filter, T* output,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dGPU(OpKernelContext* ctx, const DepthwiseArgs& args,
+ const T* input, const T* filter, T* output,
+ TensorFormat data_format) {
void (*kernel)(const DepthwiseArgs, const T*, const T*, T*, int);
switch (data_format) {
case FORMAT_NHWC:
@@ -691,11 +752,12 @@ void LaunchDepthwiseConv2dGPU(const GpuDevice& device,
kKnownDepthMultiplier>;
break;
default:
- LOG(ERROR) << "FORMAT_" << ToString(data_format) << " is not supported";
- return;
+ return errors::InvalidArgument("FORMAT_", ToString(data_format),
+ " is not supported");
}
const int num_outputs =
args.batch * args.out_rows * args.out_cols * args.out_depth;
+ auto device = ctx->eigen_gpu_device();
CudaLaunchConfig config =
GetCudaLaunchConfig(num_outputs, device, kernel, 0, 0);
// The compile-time constant version runs faster with a single block.
@@ -706,26 +768,27 @@ void LaunchDepthwiseConv2dGPU(const GpuDevice& device,
kernel<<<std::min(max_block_count, config.block_count),
config.thread_per_block, 0, device.stream()>>>(args, input, filter,
output, num_outputs);
+ return Status::OK();
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
-void LaunchDepthwiseConv2dGPU(const GpuDevice& device,
- const DepthwiseArgs& args, const T* input,
- const T* filter, T* output,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dGPU(OpKernelContext* ctx, const DepthwiseArgs& args,
+ const T* input, const T* filter, T* output,
+ TensorFormat data_format) {
if (args.depth_multiplier == 1) {
if (CanLaunchDepthwiseConv2dGPUSmall(args)) {
- LaunchDepthwiseConv2dGPUSmall<T, DIRECTION_FORWARD, kKnownFilterWidth,
- kKnownFilterHeight>(
- device, args, input, filter, output, data_format);
- return;
+ return LaunchDepthwiseConv2dGPUSmall<
+ T, DIRECTION_FORWARD, kKnownFilterWidth, kKnownFilterHeight>(
+ ctx, args, input, filter, output, data_format);
}
- LaunchDepthwiseConv2dGPU<T, kKnownFilterWidth, kKnownFilterHeight, 1>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPU<T, kKnownFilterWidth, kKnownFilterHeight,
+ 1>(ctx, args, input, filter, output,
+ data_format);
} else {
- LaunchDepthwiseConv2dGPU<T, kKnownFilterWidth, kKnownFilterHeight, -1>(
- device, args, input, filter, output, data_format);
+ return LaunchDepthwiseConv2dGPU<T, kKnownFilterWidth, kKnownFilterHeight,
+ -1>(ctx, args, input, filter, output,
+ data_format);
}
}
@@ -736,18 +799,13 @@ void LaunchDepthwiseConvOp<GpuDevice, T>::operator()(OpKernelContext* ctx,
const T* input,
const T* filter, T* output,
TensorFormat data_format) {
- const GpuDevice& device = ctx->eigen_device<GpuDevice>();
if (args.filter_rows == 3 && args.filter_cols == 3) {
- LaunchDepthwiseConv2dGPU<T, 3, 3>(device, args, input, filter, output,
- data_format);
+ OP_REQUIRES_OK(ctx, LaunchDepthwiseConv2dGPU<T, 3, 3>(
+ ctx, args, input, filter, output, data_format));
} else {
- LaunchDepthwiseConv2dGPU<T, -1, -1>(device, args, input, filter, output,
- data_format);
+ OP_REQUIRES_OK(ctx, LaunchDepthwiseConv2dGPU<T, -1, -1>(
+ ctx, args, input, filter, output, data_format));
}
- auto stream = ctx->op_device_context()->stream();
- OP_REQUIRES(ctx, stream->ok(),
- errors::Internal(
- "Launch of gpu kernel for DepthwiseConv2dGPULaunch failed"));
}
template struct LaunchDepthwiseConvOp<GpuDevice, Eigen::half>;
@@ -904,11 +962,11 @@ __global__ void __launch_bounds__(640, 2)
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
int kKnownDepthMultiplier>
-void LaunchDepthwiseConv2dBackpropInputGPU(const GpuDevice& device,
- const DepthwiseArgs& args,
- const T* out_backprop,
- const T* filter, T* in_backprop,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dBackpropInputGPU(OpKernelContext* ctx,
+ const DepthwiseArgs& args,
+ const T* out_backprop,
+ const T* filter, T* in_backprop,
+ TensorFormat data_format) {
void (*kernel)(const DepthwiseArgs, const T*, const T*, T*, int);
switch (data_format) {
case FORMAT_NHWC:
@@ -920,38 +978,39 @@ void LaunchDepthwiseConv2dBackpropInputGPU(const GpuDevice& device,
T, kKnownFilterWidth, kKnownFilterHeight, kKnownDepthMultiplier>;
break;
default:
- LOG(ERROR) << "FORMAT_" << ToString(data_format) << " is not supported";
- return;
+ return errors::InvalidArgument("FORMAT_", ToString(data_format),
+ " is not supported");
}
const int num_in_backprop =
args.batch * args.in_rows * args.in_cols * args.in_depth;
+ auto device = ctx->eigen_gpu_device();
CudaLaunchConfig config =
GetCudaLaunchConfig(num_in_backprop, device, kernel, 0, 0);
kernel<<<config.block_count, config.thread_per_block, 0, device.stream()>>>(
args, out_backprop, filter, in_backprop, num_in_backprop);
+ return Status::OK();
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
-void LaunchDepthwiseConv2dBackpropInputGPU(const GpuDevice& device,
- const DepthwiseArgs& args,
- const T* out_backprop,
- const T* filter, T* in_backprop,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dBackpropInputGPU(OpKernelContext* ctx,
+ const DepthwiseArgs& args,
+ const T* out_backprop,
+ const T* filter, T* in_backprop,
+ TensorFormat data_format) {
if (args.depth_multiplier == 1) {
if (CanLaunchDepthwiseConv2dGPUSmall(args)) {
- LaunchDepthwiseConv2dGPUSmall<T, DIRECTION_BACKWARD, kKnownFilterWidth,
- kKnownFilterHeight>(
- device, args, out_backprop, filter, in_backprop, data_format);
- return;
+ return LaunchDepthwiseConv2dGPUSmall<
+ T, DIRECTION_BACKWARD, kKnownFilterWidth, kKnownFilterHeight>(
+ ctx, args, out_backprop, filter, in_backprop, data_format);
}
- LaunchDepthwiseConv2dBackpropInputGPU<T, kKnownFilterWidth,
- kKnownFilterHeight, 1>(
- device, args, out_backprop, filter, in_backprop, data_format);
+ return LaunchDepthwiseConv2dBackpropInputGPU<T, kKnownFilterWidth,
+ kKnownFilterHeight, 1>(
+ ctx, args, out_backprop, filter, in_backprop, data_format);
} else {
- LaunchDepthwiseConv2dBackpropInputGPU<T, kKnownFilterWidth,
- kKnownFilterHeight, -1>(
- device, args, out_backprop, filter, in_backprop, data_format);
+ return LaunchDepthwiseConv2dBackpropInputGPU<T, kKnownFilterWidth,
+ kKnownFilterHeight, -1>(
+ ctx, args, out_backprop, filter, in_backprop, data_format);
}
}
@@ -960,19 +1019,15 @@ template <typename T>
void LaunchDepthwiseConvBackpropInputOp<GpuDevice, T>::operator()(
OpKernelContext* ctx, const DepthwiseArgs& args, const T* out_backprop,
const T* filter, T* in_backprop, TensorFormat data_format) {
- const GpuDevice& device = ctx->eigen_device<GpuDevice>();
if (args.filter_rows == 3 && args.filter_cols == 3) {
- LaunchDepthwiseConv2dBackpropInputGPU<T, 3, 3>(
- device, args, out_backprop, filter, in_backprop, data_format);
+ OP_REQUIRES_OK(
+ ctx, LaunchDepthwiseConv2dBackpropInputGPU<T, 3, 3>(
+ ctx, args, out_backprop, filter, in_backprop, data_format));
} else {
- LaunchDepthwiseConv2dBackpropInputGPU<T, -1, -1>(
- device, args, out_backprop, filter, in_backprop, data_format);
+ OP_REQUIRES_OK(
+ ctx, LaunchDepthwiseConv2dBackpropInputGPU<T, -1, -1>(
+ ctx, args, out_backprop, filter, in_backprop, data_format));
}
- auto stream = ctx->op_device_context()->stream();
- OP_REQUIRES(ctx, stream->ok(),
- errors::Internal("Launch of gpu kernel for "
- "DepthwiseConv2dBackpropInp"
- "utGPULaunch failed"));
}
template struct LaunchDepthwiseConvBackpropInputOp<GpuDevice, Eigen::half>;
@@ -1111,15 +1166,18 @@ __device__ __forceinline__ T WarpSumReduce(T val) {
// up in global memory using atomics.
// Requirements: threads per block must be multiple of 32 and <= launch_bounds,
// kAccumPixels * 64 >= args.in_rows * args.in_cols * kBlockDepth.
+// T is the tensors' data type. S is the math type the kernel uses. This is the
+// same as T for all cases but pseudo half (which has T=Eigen::half, S=float).
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
- int kBlockDepth, int kAccumPixels>
+ int kBlockDepth, int kAccumPixels, typename S>
__global__
__launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall(
const DepthwiseArgs args, const T* output, const T* input, T* filter) {
assert(CanLaunchDepthwiseConv2dBackpropFilterGPUSmall(args, blockDim.z));
// Holds block plus halo and filter data for blockDim.x depths.
- extern __shared__ __align__(sizeof(T)) unsigned char shared_memory[];
- T* const shared_data = reinterpret_cast<T*>(shared_memory);
+ extern __shared__ __align__(8) unsigned char shared_memory[];
+ static_assert(sizeof(S) <= 8, "Insufficient alignement detected");
+ S* const shared_data = reinterpret_cast<S*>(shared_memory);
const int num_batches = args.batch;
const int in_height = args.in_rows;
@@ -1169,7 +1227,7 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall(
// Initialize tile, in particular the padding and accumulator.
for (int i = thread_idx; i < tile_size + accum_size; i += block_size) {
- shared_data[i] = T(0);
+ shared_data[i] = S();
}
__syncthreads();
@@ -1203,10 +1261,10 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall(
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
- T* const tile_ptr = tile_idx + shared_data;
- tile_ptr[0] = ldg(in_ptr);
+ S* const tile_ptr = tile_idx + shared_data;
+ tile_ptr[0] = static_cast<S>(ldg(in_ptr));
if (!skip_second) {
- tile_ptr[tile_offset] = ldg(tensor_offset + in_ptr);
+ tile_ptr[tile_offset] = static_cast<S>(ldg(tensor_offset + in_ptr));
}
}
@@ -1216,14 +1274,15 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall(
if (channel_in_range) {
const T* const out_ptr = inout_offset + output;
- const T out1 = ldg(out_ptr);
- const T out2 = skip_second ? T(0) : ldg(tensor_offset + out_ptr);
+ const S out1 = static_cast<S>(ldg(out_ptr));
+ const S out2 =
+ skip_second ? S() : static_cast<S>(ldg(tensor_offset + out_ptr));
int shared_offset = data_idx;
- T* accum_ptr = accum_offset + shared_data;
+ S* accum_ptr = accum_offset + shared_data;
UNROLL for (int r = 0; r < filter_height; ++r) {
UNROLL for (int c = 0; c < filter_width; ++c) {
- const T* const tile_ptr = shared_offset + shared_data;
- T val = out1 * tile_ptr[0] + out2 * tile_ptr[tile_offset];
+ const S* const tile_ptr = shared_offset + shared_data;
+ S val = out1 * tile_ptr[0] + out2 * tile_ptr[tile_offset];
// Warp-accumulate pixels of the same depth and write to accumulator.
for (int delta = 16; delta >= kBlockDepth; delta /= 2) {
val += CudaShuffleXorSync(active_threads, val, delta);
@@ -1241,18 +1300,18 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall(
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
- const T* const accum_data = tile_size + shared_data;
+ const S* const accum_data = tile_size + shared_data;
for (int i = thread_idx; i < accum_size; i += block_size) {
const int filter_idx = i / kAccumPixels;
const int filter_pix = filter_idx / kBlockDepth;
const int filter_channel = filter_idx % kBlockDepth + start_channel;
const int filter_offset = filter_pix * in_depth + filter_channel;
if (filter_channel < in_depth) {
- T val = accum_data[i];
+ S val = accum_data[i];
// Warp-accumulate the pixels of the same depth from the accumulator.
val = WarpSumReduce<kAccumPixels>(val);
if (!(thread_idx & kAccumPixels - 1)) {
- CudaAtomicAdd(filter_offset + filter, val);
+ CudaAtomicAdd(filter_offset + filter, static_cast<T>(val));
}
}
}
@@ -1382,14 +1441,15 @@ __global__ void __launch_bounds__(640, 2)
// Requirements: threads per block must be multiple of 32 and <= launch_bounds,
// kAccumPixels * 64 >= args.in_rows * args.in_cols * kBlockDepth.
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
- int kBlockDepth, int kAccumPixels>
+ int kBlockDepth, int kAccumPixels, typename S>
__global__
__launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
const DepthwiseArgs args, const T* output, const T* input, T* filter) {
assert(CanLaunchDepthwiseConv2dBackpropFilterGPUSmall(args, blockDim.x));
// Holds block plus halo and filter data for blockDim.z depths.
- extern __shared__ __align__(sizeof(T)) unsigned char shared_memory[];
- T* const shared_data = reinterpret_cast<T*>(shared_memory);
+ extern __shared__ __align__(8) unsigned char shared_memory[];
+ static_assert(sizeof(S) <= 8, "Insufficient alignement detected");
+ S* const shared_data = reinterpret_cast<S*>(shared_memory);
const int num_batches = args.batch;
const int in_height = args.in_rows;
@@ -1438,7 +1498,7 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
// Initialize tile, in particular the padding and accumulator.
for (int i = thread_idx; i < tile_size + accum_size; i += block_size) {
- shared_data[i] = T(0);
+ shared_data[i] = S();
}
__syncthreads();
@@ -1468,10 +1528,10 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
- T* const tile_ptr = tile_idx + shared_data;
- tile_ptr[0] = ldg(in_ptr);
+ S* const tile_ptr = tile_idx + shared_data;
+ tile_ptr[0] = static_cast<S>(ldg(in_ptr));
if (!skip_second) {
- tile_ptr[tile_offset] = ldg(block_pixels + in_ptr);
+ tile_ptr[tile_offset] = static_cast<S>(ldg(block_pixels + in_ptr));
}
}
@@ -1481,14 +1541,15 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
if (channel_in_range) {
const T* const out_ptr = inout_offset + output;
- const T out1 = ldg(out_ptr);
- const T out2 = skip_second ? T(0) : ldg(block_pixels + out_ptr);
+ const S out1 = static_cast<S>(ldg(out_ptr));
+ const S out2 =
+ skip_second ? S() : static_cast<S>(ldg(block_pixels + out_ptr));
int shared_offset = data_idx;
- T* accum_ptr = accum_offset + shared_data;
+ S* accum_ptr = accum_offset + shared_data;
UNROLL for (int r = 0; r < filter_height; ++r) {
UNROLL for (int c = 0; c < filter_width; ++c) {
- const T* const tile_ptr = shared_offset + shared_data;
- T val = out1 * tile_ptr[0] + out2 * tile_ptr[tile_offset];
+ const S* const tile_ptr = shared_offset + shared_data;
+ S val = out1 * tile_ptr[0] + out2 * tile_ptr[tile_offset];
// Warp-accumulate pixels of the same depth and write to accumulator.
for (int delta = 16 / kBlockDepth; delta > 0; delta /= 2) {
val += CudaShuffleXorSync(active_threads, val, delta);
@@ -1506,7 +1567,7 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
- const T* const accum_data = tile_size + shared_data;
+ const S* const accum_data = tile_size + shared_data;
for (int i = thread_idx; i < accum_size; i += block_size) {
const int filter_idx = i / kAccumPixels;
const int filter_pix = filter_idx / kBlockDepth;
@@ -1514,11 +1575,11 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
(channel + filter_idx % kBlockDepth) % in_depth;
const int filter_offset = filter_pix * in_depth + filter_channel;
if (filter_channel < in_depth) {
- T val = accum_data[i];
+ S val = accum_data[i];
// Warp-accumulate pixels of the same depth from the accumulator.
val = WarpSumReduce<kAccumPixels>(val);
if (!(thread_idx & kAccumPixels - 1)) {
- CudaAtomicAdd(filter_offset + filter, val);
+ CudaAtomicAdd(filter_offset + filter, static_cast<T>(val));
}
}
}
@@ -1526,19 +1587,20 @@ __launch_bounds__(1024, 2) void DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall(
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
- int kBlockDepth, int kAccumPixels>
-bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
- const GpuDevice& device, const DepthwiseArgs& args, const int block_height,
+ int kBlockDepth, int kAccumPixels, typename S>
+Status TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
+ OpKernelContext* ctx, const DepthwiseArgs& args, const int block_height,
const T* out_backprop, const T* input, T* filter_backprop,
TensorFormat data_format) {
+ auto device = ctx->eigen_gpu_device();
const int tile_width = args.in_cols + args.filter_cols - 1;
const int tile_height = block_height * 2 + args.filter_rows - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = args.filter_rows * args.filter_cols;
const int shared_memory_size =
- kBlockDepth * (tile_pixels + filter_pixels * kAccumPixels) * sizeof(T);
+ kBlockDepth * (tile_pixels + filter_pixels * kAccumPixels) * sizeof(S);
if (shared_memory_size > device.sharedMemPerBlock()) {
- return false;
+ return errors::FailedPrecondition("Not enough shared memory");
}
dim3 block_dim;
@@ -1550,18 +1612,20 @@ bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
block_count =
args.batch * DivUp(args.out_depth, kBlockDepth) * kBlockDepth;
kernel = DepthwiseConv2dBackpropFilterGPUKernelNHWCSmall<
- T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, kAccumPixels>;
+ T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, kAccumPixels,
+ S>;
break;
case FORMAT_NCHW:
block_dim = dim3(args.in_cols, block_height, kBlockDepth);
block_count =
DivUp(args.batch * args.out_depth, kBlockDepth) * kBlockDepth;
kernel = DepthwiseConv2dBackpropFilterGPUKernelNCHWSmall<
- T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, kAccumPixels>;
+ T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, kAccumPixels,
+ S>;
break;
default:
- LOG(ERROR) << "FORMAT_" << ToString(data_format) << " is not supported";
- return false;
+ return errors::InvalidArgument("FORMAT_", ToString(data_format),
+ " is not supported");
}
const int num_out_backprop = args.out_rows * args.out_cols * block_count;
CudaLaunchConfig config = GetCudaLaunchConfigFixedBlockSize(
@@ -1569,13 +1633,33 @@ bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
block_dim.x * block_dim.y * block_dim.z);
kernel<<<config.block_count, block_dim, shared_memory_size,
device.stream()>>>(args, out_backprop, input, filter_backprop);
- return true;
+ return Status::OK();
+}
+
+template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
+ int kBlockDepth, int kAccumPixels>
+Status TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
+ OpKernelContext* ctx, const DepthwiseArgs& args, const int block_height,
+ const T* out_backprop, const T* input, T* filter_backprop,
+ TensorFormat data_format) {
+#if !defined __CUDA_ARCH__ || __CUDA_ARCH__ >= 530
+ if (HasFastHalfMath(ctx)) {
+ return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
+ T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, kAccumPixels, T>(
+ ctx, args, block_height, out_backprop, input, filter_backprop,
+ data_format);
+ }
+#endif
+ return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
+ T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, kAccumPixels,
+ PseudoHalfType<T>>(ctx, args, block_height, out_backprop, input,
+ filter_backprop, data_format);
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
int kBlockDepth>
-bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
- const GpuDevice& device, const DepthwiseArgs& args, const int block_height,
+Status TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
+ OpKernelContext* ctx, const DepthwiseArgs& args, const int block_height,
const T* out_backprop, const T* input, T* filter_backprop,
TensorFormat data_format) {
// Minimize (power of two) kAccumPixels, while satisfying
@@ -1584,24 +1668,24 @@ bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
if (block_pixels > 512) {
return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, 32>(
- device, args, block_height, out_backprop, input, filter_backprop,
+ ctx, args, block_height, out_backprop, input, filter_backprop,
data_format);
} else if (block_pixels > 256) {
return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, 16>(
- device, args, block_height, out_backprop, input, filter_backprop,
+ ctx, args, block_height, out_backprop, input, filter_backprop,
data_format);
} else {
return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
T, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth, 8>(
- device, args, block_height, out_backprop, input, filter_backprop,
+ ctx, args, block_height, out_backprop, input, filter_backprop,
data_format);
}
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
-bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
- const GpuDevice& device, const DepthwiseArgs& args, const T* out_backprop,
+Status TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
+ OpKernelContext* ctx, const DepthwiseArgs& args, const T* out_backprop,
const T* input, T* filter_backprop, TensorFormat data_format) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
@@ -1621,37 +1705,35 @@ bool TryLaunchDepthwiseConv2dBackpropFilterGPUSmall(
}
if (!CanLaunchDepthwiseConv2dBackpropFilterGPUSmall(args, block_height)) {
- return false;
+ return errors::FailedPrecondition("Cannot launch this configuration");
}
switch (block_depth) {
case 8:
return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
T, kKnownFilterWidth, kKnownFilterHeight, 8>(
- device, args, block_height, out_backprop, input, filter_backprop,
+ ctx, args, block_height, out_backprop, input, filter_backprop,
data_format);
case 4:
return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
T, kKnownFilterWidth, kKnownFilterHeight, 4>(
- device, args, block_height, out_backprop, input, filter_backprop,
+ ctx, args, block_height, out_backprop, input, filter_backprop,
data_format);
case 2:
return TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<
T, kKnownFilterWidth, kKnownFilterHeight, 2>(
- device, args, block_height, out_backprop, input, filter_backprop,
+ ctx, args, block_height, out_backprop, input, filter_backprop,
data_format);
default:
- return false;
+ return errors::InvalidArgument("Unexpected block depth");
}
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight,
int kKnownDepthMultiplier>
-void LaunchDepthwiseConv2dBackpropFilterGPU(const GpuDevice& device,
- const DepthwiseArgs& args,
- const T* out_backprop,
- const T* input, T* filter_backprop,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dBackpropFilterGPU(
+ OpKernelContext* ctx, const DepthwiseArgs& args, const T* out_backprop,
+ const T* input, T* filter_backprop, TensorFormat data_format) {
void (*kernel)(const DepthwiseArgs, const T*, const T*, T*, int);
switch (data_format) {
case FORMAT_NHWC:
@@ -1663,37 +1745,38 @@ void LaunchDepthwiseConv2dBackpropFilterGPU(const GpuDevice& device,
T, kKnownFilterWidth, kKnownFilterHeight, kKnownDepthMultiplier>;
break;
default:
- LOG(ERROR) << "FORMAT_" << ToString(data_format) << " is not supported";
- return;
+ return errors::InvalidArgument("FORMAT_", ToString(data_format),
+ " is not supported");
}
const int num_out_backprop =
args.batch * args.out_rows * args.out_cols * args.out_depth;
+ auto device = ctx->eigen_gpu_device();
CudaLaunchConfig config =
GetCudaLaunchConfig(num_out_backprop, device, kernel, 0, 0);
kernel<<<config.block_count, config.thread_per_block, 0, device.stream()>>>(
args, out_backprop, input, filter_backprop, num_out_backprop);
+ return Status::OK();
}
template <typename T, int kKnownFilterWidth, int kKnownFilterHeight>
-void LaunchDepthwiseConv2dBackpropFilterGPU(const GpuDevice& device,
- const DepthwiseArgs& args,
- const T* out_backprop,
- const T* input, T* filter_backprop,
- TensorFormat data_format) {
+Status LaunchDepthwiseConv2dBackpropFilterGPU(
+ OpKernelContext* ctx, const DepthwiseArgs& args, const T* out_backprop,
+ const T* input, T* filter_backprop, TensorFormat data_format) {
if (args.depth_multiplier == 1) {
if (TryLaunchDepthwiseConv2dBackpropFilterGPUSmall<T, kKnownFilterWidth,
kKnownFilterHeight>(
- device, args, out_backprop, input, filter_backprop, data_format)) {
- return;
+ ctx, args, out_backprop, input, filter_backprop, data_format)
+ .ok()) {
+ return Status::OK();
}
- LaunchDepthwiseConv2dBackpropFilterGPU<T, kKnownFilterWidth,
- kKnownFilterHeight, 1>(
- device, args, out_backprop, input, filter_backprop, data_format);
+ return LaunchDepthwiseConv2dBackpropFilterGPU<T, kKnownFilterWidth,
+ kKnownFilterHeight, 1>(
+ ctx, args, out_backprop, input, filter_backprop, data_format);
} else {
- LaunchDepthwiseConv2dBackpropFilterGPU<T, kKnownFilterWidth,
- kKnownFilterHeight, -1>(
- device, args, out_backprop, input, filter_backprop, data_format);
+ return LaunchDepthwiseConv2dBackpropFilterGPU<T, kKnownFilterWidth,
+ kKnownFilterHeight, -1>(
+ ctx, args, out_backprop, input, filter_backprop, data_format);
}
}
@@ -1702,7 +1785,6 @@ template <typename T>
void LaunchDepthwiseConvBackpropFilterOp<GpuDevice, T>::operator()(
OpKernelContext* ctx, const DepthwiseArgs& args, const T* out_backprop,
const T* input, T* filter_backprop, TensorFormat data_format) {
- const GpuDevice& device = ctx->eigen_device<GpuDevice>();
auto stream = ctx->op_device_context()->stream();
// Initialize the results to 0.
@@ -1712,16 +1794,14 @@ void LaunchDepthwiseConvBackpropFilterOp<GpuDevice, T>::operator()(
stream->ThenMemset32(&filter_bp_ptr, 0, num_filter_backprop * sizeof(T));
if (args.filter_rows == 3 && args.filter_cols == 3) {
- LaunchDepthwiseConv2dBackpropFilterGPU<T, 3, 3>(
- device, args, out_backprop, input, filter_backprop, data_format);
+ OP_REQUIRES_OK(
+ ctx, LaunchDepthwiseConv2dBackpropFilterGPU<T, 3, 3>(
+ ctx, args, out_backprop, input, filter_backprop, data_format));
} else {
- LaunchDepthwiseConv2dBackpropFilterGPU<T, -1, -1>(
- device, args, out_backprop, input, filter_backprop, data_format);
+ OP_REQUIRES_OK(
+ ctx, LaunchDepthwiseConv2dBackpropFilterGPU<T, -1, -1>(
+ ctx, args, out_backprop, input, filter_backprop, data_format));
}
- OP_REQUIRES(ctx, stream->ok(),
- errors::Internal("Launch of gpu kernel for "
- "DepthwiseConv2dBackpropFil"
- "terGPULaunch failed"));
}
template struct LaunchDepthwiseConvBackpropFilterOp<GpuDevice, Eigen::half>;
diff --git a/tensorflow/core/kernels/depthwise_conv_ops_test.cc b/tensorflow/core/kernels/depthwise_conv_ops_test.cc
new file mode 100644
index 0000000000..87bb68a43b
--- /dev/null
+++ b/tensorflow/core/kernels/depthwise_conv_ops_test.cc
@@ -0,0 +1,114 @@
+/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/cc/ops/const_op.h"
+#include "tensorflow/cc/ops/image_ops.h"
+#include "tensorflow/cc/ops/nn_ops.h"
+#include "tensorflow/cc/ops/standard_ops.h"
+#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
+#include "tensorflow/core/framework/fake_input.h"
+#include "tensorflow/core/framework/node_def_builder.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/kernels/conv_ops_gpu.h"
+#include "tensorflow/core/kernels/ops_testutil.h"
+#include "tensorflow/core/kernels/ops_util.h"
+#include "tensorflow/core/platform/test.h"
+#include "tensorflow/core/platform/test_benchmark.h"
+#include "tensorflow/core/public/session.h"
+
+namespace tensorflow {
+namespace {
+class DepthwiseConvOpTest : public OpsTestBase {
+ protected:
+ enum class Device { CPU, GPU };
+
+ template <typename T>
+ void Run(Device device) {
+ if (device == Device::GPU) {
+ SetDevice(DEVICE_GPU,
+ std::unique_ptr<tensorflow::Device>(DeviceFactory::NewDevice(
+ "GPU", {}, "/job:a/replica:0/task:0")));
+ }
+ DataType dtype = DataTypeToEnum<T>::value;
+ TF_EXPECT_OK(NodeDefBuilder("depthwise_conv2d", "DepthwiseConv2dNative")
+ .Input(FakeInput(dtype))
+ .Input(FakeInput(dtype))
+ .Attr("T", dtype)
+ .Attr("strides", {1, 1, 1, 1})
+ .Attr("padding", "SAME")
+ .Finalize(node_def()));
+ TF_EXPECT_OK(InitOp());
+ const int depth = 2;
+ const int image_width = 2;
+ const int image_height = 3;
+ const int batch_count = 1;
+ // The image matrix is ('first/second' channel):
+ // | 1/2 | 3/4 |
+ // | 5/6 | 7/8 |
+ // | 9/10 | 11/12 |
+ Tensor image(dtype, {batch_count, image_height, image_width, depth});
+ test::FillValues<T>(&image, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
+
+ // The filter matrix is:
+ // | 1/2 | 7/8 | 13/14 |
+ // | 3/4 | 9/10 | 15/16 |
+ // | 5/6 | 11/12 | 17/18 |
+ const int filter_size = 3;
+ const int filter_count = 1;
+ Tensor filter(dtype, {filter_size, filter_size, depth, filter_count});
+ test::FillValues<T>(&filter, {1, 2, 7, 8, 13, 14, 3, 4, 9, 10, 15, 16, 5, 6,
+ 11, 12, 17, 18});
+
+ AddInputFromArray<T>(image.shape(), image.flat<T>());
+ AddInputFromArray<T>(filter.shape(), filter.flat<T>());
+ TF_ASSERT_OK(RunOpKernel());
+
+ // We're sliding two 3x3 filters across the 3x2 image, with accesses outside
+ // the input set to zero because we're using the 'SAME' padding mode.
+ // This means we should end up with this matrix:
+ // | 105/150 | 183/95 |
+ // | 235/312 | 357/178 |
+ // | 187/234 | 261/121 |
+ Tensor expected(dtype, image.shape());
+ test::FillValues<T>(&expected, {228, 300, 132, 180, 482, 596, 266, 344, 372,
+ 452, 180, 236});
+ const Tensor& output = *GetOutput(0);
+ // TODO(csigg): This should happen as part of GetOutput.
+ TF_EXPECT_OK(device_->Sync());
+ test::ExpectTensorNear<T>(expected, output, 1e-5);
+ }
+};
+
+TEST_F(DepthwiseConvOpTest, DepthwiseConvFloatCpu) { Run<float>(Device::CPU); }
+TEST_F(DepthwiseConvOpTest, DepthwiseConvDoubleCpu) {
+ Run<double>(Device::CPU);
+}
+TEST_F(DepthwiseConvOpTest, DepthwiseConvHalfCpu) {
+ Run<Eigen::half>(Device::CPU);
+}
+
+#ifdef GOOGLE_CUDA
+TEST_F(DepthwiseConvOpTest, DepthwiseConvFloatGpu) { Run<float>(Device::GPU); }
+TEST_F(DepthwiseConvOpTest, DepthwiseConvDoubleGpu) {
+ Run<double>(Device::GPU);
+}
+TEST_F(DepthwiseConvOpTest, DepthwiseConvHalfGpu) {
+ Run<Eigen::half>(Device::GPU);
+}
+#endif
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/core/kernels/deserialize_sparse_string_op.cc b/tensorflow/core/kernels/deserialize_sparse_string_op.cc
index 6fb07c11e9..2c13f24ad6 100644
--- a/tensorflow/core/kernels/deserialize_sparse_string_op.cc
+++ b/tensorflow/core/kernels/deserialize_sparse_string_op.cc
@@ -165,7 +165,10 @@ class DeserializeSparseOp : public OpKernel {
std::vector<SparseTensor> tensors;
tensors.reserve(num_sparse_tensors);
for (int i = 0; i < num_sparse_tensors; ++i) {
- tensors.emplace_back(indices[i], values[i], shape, std_order);
+ SparseTensor tensor;
+ OP_REQUIRES_OK(context, SparseTensor::Create(indices[i], values[i], shape,
+ std_order, &tensor));
+ tensors.push_back(std::move(tensor));
}
gtl::optional<SparseTensor> maybe_output;
diff --git a/tensorflow/core/kernels/edit_distance_op.cc b/tensorflow/core/kernels/edit_distance_op.cc
index 20d857c721..4aecdc9e41 100644
--- a/tensorflow/core/kernels/edit_distance_op.cc
+++ b/tensorflow/core/kernels/edit_distance_op.cc
@@ -133,10 +133,15 @@ class EditDistanceOp : public OpKernel {
std::vector<int64> sorted_order(truth_st_shape.dims());
std::iota(sorted_order.begin(), sorted_order.end(), 0);
- sparse::SparseTensor hypothesis(*hypothesis_indices, *hypothesis_values,
- hypothesis_st_shape, sorted_order);
- sparse::SparseTensor truth(*truth_indices, *truth_values, truth_st_shape,
- sorted_order);
+ sparse::SparseTensor hypothesis;
+ OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
+ *hypothesis_indices, *hypothesis_values,
+ hypothesis_st_shape, sorted_order, &hypothesis));
+
+ sparse::SparseTensor truth;
+ OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(
+ *truth_indices, *truth_values, truth_st_shape,
+ sorted_order, &truth));
// Group dims 0, 1, ..., RANK - 1. The very last dim is assumed
// to store the variable length sequences.
diff --git a/tensorflow/core/kernels/encode_proto_op.cc b/tensorflow/core/kernels/encode_proto_op.cc
index 3b02ae52a2..4a0c1943e5 100644
--- a/tensorflow/core/kernels/encode_proto_op.cc
+++ b/tensorflow/core/kernels/encode_proto_op.cc
@@ -31,6 +31,7 @@ limitations under the License.
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/proto/descriptors.h"
+#include "tensorflow/core/util/proto/proto_utils.h"
namespace tensorflow {
namespace {
@@ -42,9 +43,9 @@ using ::tensorflow::protobuf::internal::WireFormatLite;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
-// Computes the total serialized size for a packed repeated field.
-// For fixed-size types this can just multiply, but for variable-sized
-// types it has to iterate through the values in the tensor.
+// Computes the total serialized size for a packed repeated field. For
+// fixed-size types this can just multiply, but for variable-sized types it has
+// to iterate through the values in the tensor.
template <WireFormatLite::FieldType FieldType, typename TensorT>
size_t TotalPackedSize(const Tensor& input, int message_index, int size);
@@ -83,11 +84,11 @@ size_t TotalPackedSize<WireFormatLite::TYPE_INT64, int64>(const Tensor& input,
}
template <>
-size_t TotalPackedSize<WireFormatLite::TYPE_UINT64, int64>(const Tensor& input,
- int message_index,
- int size) {
+size_t TotalPackedSize<WireFormatLite::TYPE_UINT64, uint64>(const Tensor& input,
+ int message_index,
+ int size) {
size_t data_size = 0;
- auto input_t = input.flat_inner_dims<int64>();
+ auto input_t = input.flat_inner_dims<uint64>();
for (int64 i = 0; i < size; i++) {
data_size += WireFormatLite::UInt64Size(
input_t(static_cast<int64>(message_index), i));
@@ -96,6 +97,19 @@ size_t TotalPackedSize<WireFormatLite::TYPE_UINT64, int64>(const Tensor& input,
}
template <>
+size_t TotalPackedSize<WireFormatLite::TYPE_INT32, int64>(const Tensor& input,
+ int message_index,
+ int size) {
+ size_t data_size = 0;
+ auto input_t = input.flat_inner_dims<int64>();
+ for (int64 i = 0; i < size; i++) {
+ data_size += WireFormatLite::Int32Size(
+ input_t(static_cast<int64>(message_index), i));
+ }
+ return data_size;
+}
+
+template <>
size_t TotalPackedSize<WireFormatLite::TYPE_INT32, int32>(const Tensor& input,
int message_index,
int size) {
@@ -109,23 +123,20 @@ size_t TotalPackedSize<WireFormatLite::TYPE_INT32, int32>(const Tensor& input,
}
template <>
-size_t TotalPackedSize<WireFormatLite::TYPE_FIXED64, int64>(const Tensor& input,
- int message_index,
- int size) {
+size_t TotalPackedSize<WireFormatLite::TYPE_FIXED64, uint64>(
+ const Tensor& input, int message_index, int size) {
return size * WireFormatLite::kFixed64Size;
}
template <>
-size_t TotalPackedSize<WireFormatLite::TYPE_FIXED32, int64>(const Tensor& input,
- int message_index,
- int size) {
+size_t TotalPackedSize<WireFormatLite::TYPE_FIXED32, uint64>(
+ const Tensor& input, int message_index, int size) {
return size * WireFormatLite::kFixed32Size;
}
template <>
-size_t TotalPackedSize<WireFormatLite::TYPE_FIXED32, int32>(const Tensor& input,
- int message_index,
- int size) {
+size_t TotalPackedSize<WireFormatLite::TYPE_FIXED32, uint32>(
+ const Tensor& input, int message_index, int size) {
return size * WireFormatLite::kFixed32Size;
}
@@ -137,11 +148,11 @@ size_t TotalPackedSize<WireFormatLite::TYPE_BOOL, bool>(const Tensor& input,
}
template <>
-size_t TotalPackedSize<WireFormatLite::TYPE_UINT32, int64>(const Tensor& input,
- int message_index,
- int size) {
+size_t TotalPackedSize<WireFormatLite::TYPE_UINT32, uint64>(const Tensor& input,
+ int message_index,
+ int size) {
size_t data_size = 0;
- auto input_t = input.flat_inner_dims<int64>();
+ auto input_t = input.flat_inner_dims<uint64>();
for (int64 i = 0; i < size; i++) {
data_size += WireFormatLite::UInt32Size(
input_t(static_cast<int64>(message_index), i));
@@ -150,11 +161,11 @@ size_t TotalPackedSize<WireFormatLite::TYPE_UINT32, int64>(const Tensor& input,
}
template <>
-size_t TotalPackedSize<WireFormatLite::TYPE_UINT32, int32>(const Tensor& input,
- int message_index,
- int size) {
+size_t TotalPackedSize<WireFormatLite::TYPE_UINT32, uint32>(const Tensor& input,
+ int message_index,
+ int size) {
size_t data_size = 0;
- auto input_t = input.flat_inner_dims<int32>();
+ auto input_t = input.flat_inner_dims<uint32>();
for (int64 i = 0; i < size; i++) {
data_size += WireFormatLite::UInt32Size(
input_t(static_cast<int64>(message_index), i));
@@ -182,6 +193,12 @@ size_t TotalPackedSize<WireFormatLite::TYPE_SFIXED32, int32>(
}
template <>
+size_t TotalPackedSize<WireFormatLite::TYPE_SFIXED32, int64>(
+ const Tensor& input, int message_index, int size) {
+ return size * WireFormatLite::kSFixed32Size;
+}
+
+template <>
size_t TotalPackedSize<WireFormatLite::TYPE_SFIXED64, int64>(
const Tensor& input, int message_index, int size) {
return size * WireFormatLite::kSFixed64Size;
@@ -201,6 +218,19 @@ size_t TotalPackedSize<WireFormatLite::TYPE_SINT32, int32>(const Tensor& input,
}
template <>
+size_t TotalPackedSize<WireFormatLite::TYPE_SINT32, int64>(const Tensor& input,
+ int message_index,
+ int size) {
+ size_t data_size = 0;
+ auto input_t = input.flat_inner_dims<int64>();
+ for (int64 i = 0; i < size; i++) {
+ data_size += WireFormatLite::SInt32Size(
+ input_t(static_cast<int64>(message_index), i));
+ }
+ return data_size;
+}
+
+template <>
size_t TotalPackedSize<WireFormatLite::TYPE_SINT64, int64>(const Tensor& input,
int message_index,
int size) {
@@ -213,14 +243,13 @@ size_t TotalPackedSize<WireFormatLite::TYPE_SINT64, int64>(const Tensor& input,
return data_size;
}
-// Writes a possibly repeated primitive field.
-// TensorFlow does not have unsigned types, so we decode them to signed and
-// encode them back to unsigned.
+// Writes a possibly repeated primitive field. TensorFlow does not have unsigned
+// types, so we decode them to signed and encode them back to unsigned.
template <typename TensorT, typename ProtoT,
WireFormatLite::FieldType FieldType,
void Writer(ProtoT, CodedOutputStream*)>
-void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
- int message_index, int size, CodedOutputStream* output) {
+Status WriteField(const FieldDescriptor& field_desc, const Tensor& input,
+ int message_index, int size, CodedOutputStream* output) {
auto wire_type = WireFormatLite::WireTypeForFieldType(
WireFormatLite::FieldType(field_desc.type()));
@@ -250,12 +279,14 @@ void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
Writer(value, output);
}
}
+ return Status::OK();
}
// Writes a possibly repeated string, bytes, or message field.
template <typename T, void Writer(int, const T&, CodedOutputStream*)>
-void WriteVarLenField(const FieldDescriptor& field_desc, const Tensor& input,
- int message_index, int size, CodedOutputStream* output) {
+Status WriteVarLenField(const FieldDescriptor& field_desc, const Tensor& input,
+ int message_index, int size,
+ CodedOutputStream* output) {
auto input_t = input.flat_inner_dims<T>();
for (int64 i = 0; i < size; i++) {
const T& value = input_t(static_cast<int64>(message_index), i);
@@ -264,14 +295,14 @@ void WriteVarLenField(const FieldDescriptor& field_desc, const Tensor& input,
// small speedup.
Writer(field_desc.number(), value, output);
}
+ return Status::OK();
}
-// Writes a group field.
-// Groups are treated like submessages, but tag-delimited
-// instead of length-delimited. WireFormatLite handles this
-// differently so we code it ourselves.
-void WriteGroup(const FieldDescriptor& field_desc, const Tensor& input,
- int message_index, int size, CodedOutputStream* output) {
+// Writes a group field. Groups are treated like submessages, but tag-delimited
+// instead of length-delimited. WireFormatLite handles this differently so we
+// code it ourselves.
+Status WriteGroup(const FieldDescriptor& field_desc, const Tensor& input,
+ int message_index, int size, CodedOutputStream* output) {
auto input_t = input.flat_inner_dims<string>();
for (int64 i = 0; i < size; i++) {
const string& value = input_t(static_cast<int64>(message_index), i);
@@ -282,16 +313,16 @@ void WriteGroup(const FieldDescriptor& field_desc, const Tensor& input,
WireFormatLite::WriteTag(field_desc.number(),
WireFormatLite::WIRETYPE_END_GROUP, output);
}
+ return Status::OK();
}
-// Writes a (possibly repeated) field into an output stream.
-// It is the caller's responsibility to ensure that the type of
-// the input tensor is compatible with the type of the proto
-// field descriptor, and that (message_index, size-1) is within
-// bounds.
-void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
- int message_index, int size, CodedOutputStream* output) {
- DataType tf_type = input.dtype();
+// Writes a (possibly repeated) field into an output stream. It is the caller's
+// responsibility to ensure that the type of the input tensor is compatible with
+// the type of the proto field descriptor, and that (message_index, size-1) is
+// within bounds.
+Status WriteField(const FieldDescriptor& field_desc, const Tensor& input,
+ int message_index, int size, CodedOutputStream* output) {
+ DataType dtype = input.dtype();
switch (field_desc.type()) {
case WireFormatLite::TYPE_DOUBLE:
@@ -299,7 +330,7 @@ void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
WireFormatLite::WriteDoubleNoTag>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_FLOAT:
- switch (tf_type) {
+ switch (dtype) {
case DataType::DT_FLOAT:
return WriteField<float, float, WireFormatLite::TYPE_FLOAT,
WireFormatLite::WriteFloatNoTag>(
@@ -309,36 +340,48 @@ void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
WireFormatLite::WriteFloatNoTag>(
field_desc, input, message_index, size, output);
default:
- return;
+ return errors::DataLoss("Failed writing TYPE_FLOAT for ",
+ DataTypeString(dtype));
}
case WireFormatLite::TYPE_INT64:
return WriteField<int64, protobuf_int64, WireFormatLite::TYPE_INT64,
WireFormatLite::WriteInt64NoTag>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_UINT64:
- return WriteField<int64, protobuf_uint64, WireFormatLite::TYPE_UINT64,
+ return WriteField<uint64, protobuf_uint64, WireFormatLite::TYPE_UINT64,
WireFormatLite::WriteUInt64NoTag>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_INT32:
- return WriteField<int32, int32, WireFormatLite::TYPE_INT32,
- WireFormatLite::WriteInt32NoTag>(
- field_desc, input, message_index, size, output);
+ switch (dtype) {
+ case DataType::DT_INT64:
+ return WriteField<int64, int32, WireFormatLite::TYPE_INT32,
+ WireFormatLite::WriteInt32NoTag>(
+ field_desc, input, message_index, size, output);
+ case DataType::DT_INT32:
+ return WriteField<int32, int32, WireFormatLite::TYPE_INT32,
+ WireFormatLite::WriteInt32NoTag>(
+ field_desc, input, message_index, size, output);
+ default:
+ return errors::DataLoss("Failed writing TYPE_INT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_FIXED64:
- return WriteField<int64, protobuf_uint64, WireFormatLite::TYPE_FIXED64,
+ return WriteField<uint64, protobuf_uint64, WireFormatLite::TYPE_FIXED64,
WireFormatLite::WriteFixed64NoTag>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_FIXED32:
- switch (tf_type) {
- case DataType::DT_INT64:
- return WriteField<int64, uint32, WireFormatLite::TYPE_FIXED32,
+ switch (dtype) {
+ case DataType::DT_UINT64:
+ return WriteField<uint64, uint32, WireFormatLite::TYPE_FIXED32,
WireFormatLite::WriteFixed32NoTag>(
field_desc, input, message_index, size, output);
- case DataType::DT_INT32:
- return WriteField<int32, uint32, WireFormatLite::TYPE_FIXED32,
+ case DataType::DT_UINT32:
+ return WriteField<uint32, uint32, WireFormatLite::TYPE_FIXED32,
WireFormatLite::WriteFixed32NoTag>(
field_desc, input, message_index, size, output);
default:
- return;
+ return errors::DataLoss("Failed writing TYPE_FIXED32 for ",
+ DataTypeString(dtype));
}
case WireFormatLite::TYPE_BOOL:
return WriteField<bool, bool, WireFormatLite::TYPE_BOOL,
@@ -356,34 +399,55 @@ void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
return WriteVarLenField<string, WireFormatLite::WriteBytes>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_UINT32:
- switch (tf_type) {
- case DataType::DT_INT64:
- return WriteField<int64, uint32, WireFormatLite::TYPE_UINT32,
+ switch (dtype) {
+ case DataType::DT_UINT64:
+ return WriteField<uint64, uint32, WireFormatLite::TYPE_UINT32,
WireFormatLite::WriteUInt32NoTag>(
field_desc, input, message_index, size, output);
- case DataType::DT_INT32:
- return WriteField<int32, uint32, WireFormatLite::TYPE_UINT32,
+ case DataType::DT_UINT32:
+ return WriteField<uint32, uint32, WireFormatLite::TYPE_UINT32,
WireFormatLite::WriteUInt32NoTag>(
field_desc, input, message_index, size, output);
default:
- return;
+ return errors::DataLoss("Failed writing TYPE_UINT32 for ",
+ DataTypeString(dtype));
}
case WireFormatLite::TYPE_ENUM:
return WriteField<int32, int32, WireFormatLite::TYPE_ENUM,
WireFormatLite::WriteEnumNoTag>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_SFIXED32:
- return WriteField<int32, int32, WireFormatLite::TYPE_SFIXED32,
- WireFormatLite::WriteSFixed32NoTag>(
- field_desc, input, message_index, size, output);
+ switch (dtype) {
+ case DataType::DT_INT64:
+ return WriteField<int64, int32, WireFormatLite::TYPE_SFIXED32,
+ WireFormatLite::WriteSFixed32NoTag>(
+ field_desc, input, message_index, size, output);
+ case DataType::DT_INT32:
+ return WriteField<int32, int32, WireFormatLite::TYPE_SFIXED32,
+ WireFormatLite::WriteSFixed32NoTag>(
+ field_desc, input, message_index, size, output);
+ default:
+ return errors::DataLoss("Failed writing TYPE_SFIXED32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_SFIXED64:
return WriteField<int64, protobuf_int64, WireFormatLite::TYPE_SFIXED64,
WireFormatLite::WriteSFixed64NoTag>(
field_desc, input, message_index, size, output);
case WireFormatLite::TYPE_SINT32:
- return WriteField<int32, int32, WireFormatLite::TYPE_SINT32,
- WireFormatLite::WriteSInt32NoTag>(
- field_desc, input, message_index, size, output);
+ switch (dtype) {
+ case DataType::DT_INT64:
+ return WriteField<int64, int32, WireFormatLite::TYPE_SINT32,
+ WireFormatLite::WriteSInt32NoTag>(
+ field_desc, input, message_index, size, output);
+ case DataType::DT_INT32:
+ return WriteField<int32, int32, WireFormatLite::TYPE_SINT32,
+ WireFormatLite::WriteSInt32NoTag>(
+ field_desc, input, message_index, size, output);
+ default:
+ return errors::DataLoss("Failed writing TYPE_SINT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_SINT64:
return WriteField<int64, protobuf_int64, WireFormatLite::TYPE_SINT64,
WireFormatLite::WriteSInt64NoTag>(
@@ -392,42 +456,6 @@ void WriteField(const FieldDescriptor& field_desc, const Tensor& input,
}
}
-// Checks that a Protobuf field is compatible with a TensorFlow datatype.
-// This is separated from WriteField to lift it out of the inner loop.
-bool IsCompatibleType(const FieldDescriptor& field_desc, DataType tf_type) {
- switch (field_desc.type()) {
- case WireFormatLite::TYPE_DOUBLE:
- return tf_type == DataType::DT_DOUBLE;
- case WireFormatLite::TYPE_FLOAT:
- return tf_type == DataType::DT_FLOAT || tf_type == DataType::DT_DOUBLE;
- case WireFormatLite::TYPE_INT64:
- case WireFormatLite::TYPE_SFIXED64:
- case WireFormatLite::TYPE_SINT64:
- return tf_type == DataType::DT_INT64;
- case WireFormatLite::TYPE_UINT64:
- return tf_type == DataType::DT_INT64;
- case WireFormatLite::TYPE_INT32:
- case WireFormatLite::TYPE_ENUM:
- case WireFormatLite::TYPE_SFIXED32:
- case WireFormatLite::TYPE_SINT32:
- return tf_type == DataType::DT_INT32;
- case WireFormatLite::TYPE_FIXED64:
- return tf_type == DataType::DT_INT64;
- case WireFormatLite::TYPE_FIXED32:
- case WireFormatLite::TYPE_UINT32:
- return tf_type == DataType::DT_INT64 || tf_type == DataType::DT_INT32;
- case WireFormatLite::TYPE_BOOL:
- return tf_type == DataType::DT_BOOL;
- case WireFormatLite::TYPE_STRING:
- case WireFormatLite::TYPE_GROUP:
- case WireFormatLite::TYPE_MESSAGE:
- case WireFormatLite::TYPE_BYTES:
- return tf_type == DataType::DT_STRING;
- // default: intentionally omitted in order to enable static checking.
- }
- return false;
-}
-
class EncodeProtoOp : public OpKernel {
public:
explicit EncodeProtoOp(OpKernelConstruction* context) : OpKernel(context) {
@@ -475,14 +503,14 @@ class EncodeProtoOp : public OpKernel {
});
}
- void Compute(OpKernelContext* cx) override {
+ void Compute(OpKernelContext* ctx) override {
const Tensor* sizes_tensor;
- OP_REQUIRES_OK(cx, cx->input("sizes", &sizes_tensor));
+ OP_REQUIRES_OK(ctx, ctx->input("sizes", &sizes_tensor));
OpInputList values;
- OP_REQUIRES_OK(cx, cx->input_list("values", &values));
+ OP_REQUIRES_OK(ctx, ctx->input_list("values", &values));
- OP_REQUIRES(cx, field_descs_.size() == values.size(),
+ OP_REQUIRES(ctx, field_descs_.size() == values.size(),
errors::InvalidArgument(
"Length of inputs list must match field_names"));
@@ -493,12 +521,14 @@ class EncodeProtoOp : public OpKernel {
const Tensor& v = values[i];
// The type of each value tensor must match the corresponding field.
- OP_REQUIRES(cx, IsCompatibleType(*field_descs_[i], v.dtype()),
- errors::InvalidArgument(
- "Incompatible type for field " + field_names_[i] +
- ". Saw dtype: ",
- DataTypeString(v.dtype()),
- " but field type is: ", field_descs_[i]->type_name()));
+ OP_REQUIRES(
+ ctx,
+ proto_utils::IsCompatibleType(field_descs_[i]->type(), v.dtype()),
+ errors::InvalidArgument(
+ "Incompatible type for field " + field_names_[i] +
+ ". Saw dtype: ",
+ DataTypeString(v.dtype()),
+ " but field type is: ", field_descs_[i]->type_name()));
// All value tensors must have the same shape prefix (i.e. batch size).
TensorShape shape_prefix = v.shape();
@@ -507,14 +537,14 @@ class EncodeProtoOp : public OpKernel {
// Do some initialization on the first input value. The rest will
// have to match this one.
if (i == 0) {
- OP_REQUIRES(cx, v.dims() >= 1,
+ OP_REQUIRES(ctx, v.dims() >= 1,
errors::InvalidArgument(
"Expected value to be at least a vector, saw shape: ",
v.shape().DebugString()));
common_prefix = shape_prefix;
message_count = common_prefix.num_elements();
} else {
- OP_REQUIRES(cx, shape_prefix == common_prefix,
+ OP_REQUIRES(ctx, shape_prefix == common_prefix,
errors::InvalidArgument(
"Values must match up to the last dimension"));
}
@@ -523,7 +553,7 @@ class EncodeProtoOp : public OpKernel {
TensorShape expected_sizes_shape = common_prefix;
expected_sizes_shape.AddDim(field_descs_.size());
- OP_REQUIRES(cx, sizes_tensor->shape() == expected_sizes_shape,
+ OP_REQUIRES(ctx, sizes_tensor->shape() == expected_sizes_shape,
errors::InvalidArgument(
"sizes should be batch_size + [len(field_names)]. Saw: ",
sizes_tensor->shape().DebugString(),
@@ -536,12 +566,11 @@ class EncodeProtoOp : public OpKernel {
int max_size = v.dim_size(v.dims() - 1);
// The last dimension of a value tensor must be greater than the
- // corresponding
- // size in the sizes tensor.
+ // corresponding size in the sizes tensor.
for (int message_index = 0; message_index < message_count;
message_index++) {
OP_REQUIRES(
- cx, sizes(message_index, i) <= max_size,
+ ctx, sizes(message_index, i) <= max_size,
errors::InvalidArgument(
"Size to write must not be larger than value tensor; but saw: ",
sizes(message_index, i), " > ", max_size, " at message ",
@@ -551,13 +580,13 @@ class EncodeProtoOp : public OpKernel {
// This pointer is owned by the context.
Tensor* output_tensor;
- OP_REQUIRES_OK(cx, cx->allocate_output(0, common_prefix, &output_tensor));
+ OP_REQUIRES_OK(ctx, ctx->allocate_output(0, common_prefix, &output_tensor));
auto bufs = output_tensor->flat<string>();
for (int message_index = 0; message_index < message_count;
message_index++) {
// TODO(nix): possibly optimize allocation here by calling
- // bufs(message_index).reserve(DEFAULT_BUF_SIZE);
+ // `bufs(message_index).reserve(DEFAULT_BUF_SIZE)`.
StringOutputStream output_string(&bufs(message_index));
CodedOutputStream out(&output_string);
// Write fields in ascending field_number order.
@@ -566,7 +595,8 @@ class EncodeProtoOp : public OpKernel {
const Tensor& v = values[i];
int size = sizes(message_index, i);
if (!size) continue;
- WriteField(field_desc, v, message_index, size, &out);
+ OP_REQUIRES_OK(ctx,
+ WriteField(field_desc, v, message_index, size, &out));
}
}
}
@@ -578,8 +608,8 @@ class EncodeProtoOp : public OpKernel {
// Owned_desc_pool_ is null when using descriptor_source=local.
std::unique_ptr<DescriptorPool> owned_desc_pool_;
- // Contains indices into field_names_, sorted by field number since
- // that's the order of writing.
+ // Contains indices into field_names_, sorted by field number since that's the
+ // order of writing.
std::vector<int> sorted_field_index_;
TF_DISALLOW_COPY_AND_ASSIGN(EncodeProtoOp);
diff --git a/tensorflow/core/kernels/functional_ops.cc b/tensorflow/core/kernels/functional_ops.cc
index 519c475332..cb285bf732 100644
--- a/tensorflow/core/kernels/functional_ops.cc
+++ b/tensorflow/core/kernels/functional_ops.cc
@@ -536,6 +536,7 @@ class FakeParamOp : public OpKernel {
};
REGISTER_KERNEL_BUILDER(Name("FakeParam").Device(DEVICE_CPU), FakeParamOp);
+REGISTER_KERNEL_BUILDER(Name("FakeParam").Device(DEVICE_GPU), FakeParamOp);
} // namespace
} // namespace tensorflow
diff --git a/tensorflow/core/kernels/gather_nd_op.cc b/tensorflow/core/kernels/gather_nd_op.cc
index 4e53291b7f..e50b7fe3bf 100644
--- a/tensorflow/core/kernels/gather_nd_op.cc
+++ b/tensorflow/core/kernels/gather_nd_op.cc
@@ -188,12 +188,13 @@ Status DoGatherNd(OpKernelContext* c, const Tensor& params,
// bad_i will only return >= 0 on CPUs right now.
if (bad_i >= 0) {
+ auto shape = indices.shape();
+ shape.RemoveLastDims(1);
return errors::InvalidArgument(
- "flat indices[", bad_i, ", :] = [",
+ "indices", SliceDebugString(shape, bad_i), " = [",
str_util::Join(
gtl::ArraySlice<Index>(&indices_mat(bad_i, 0), indices_nd), ", "),
- "] does not index into param (shape: ", params.shape().DebugString(),
- ").");
+ "] does not index into param shape ", params.shape().DebugString());
}
}
return Status::OK();
diff --git a/tensorflow/core/kernels/identity_op.cc b/tensorflow/core/kernels/identity_op.cc
index dffb4d7171..6f79729883 100644
--- a/tensorflow/core/kernels/identity_op.cc
+++ b/tensorflow/core/kernels/identity_op.cc
@@ -145,6 +145,7 @@ REGISTER_GPU_KERNEL(Variant);
REGISTER_GPU_HOST_KERNEL(int32);
REGISTER_GPU_HOST_KERNEL(bool);
REGISTER_GPU_HOST_KERNEL(string);
+REGISTER_GPU_HOST_KERNEL(ResourceHandle);
#undef REGISTER_GPU_HOST_KERNEL
diff --git a/tensorflow/core/kernels/lookup_table_op.cc b/tensorflow/core/kernels/lookup_table_op.cc
index 57b7798ba0..07e754a6ef 100644
--- a/tensorflow/core/kernels/lookup_table_op.cc
+++ b/tensorflow/core/kernels/lookup_table_op.cc
@@ -822,6 +822,7 @@ REGISTER_KERNEL(int64, float);
REGISTER_KERNEL(string, string);
REGISTER_KERNEL(string, bool);
REGISTER_KERNEL(int32, int32);
+REGISTER_KERNEL(int32, string);
#undef REGISTER_KERNEL
diff --git a/tensorflow/core/kernels/matmul_op.cc b/tensorflow/core/kernels/matmul_op.cc
index b596dbc782..80376c61aa 100644
--- a/tensorflow/core/kernels/matmul_op.cc
+++ b/tensorflow/core/kernels/matmul_op.cc
@@ -453,10 +453,14 @@ class MatMulOp : public OpKernel {
const Tensor& b = ctx->input(1);
// Check that the dimensions of the two matrices are valid.
- OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a.shape()),
- errors::InvalidArgument("In[0] is not a matrix"));
- OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(b.shape()),
- errors::InvalidArgument("In[1] is not a matrix"));
+ OP_REQUIRES(
+ ctx, TensorShapeUtils::IsMatrix(a.shape()),
+ errors::InvalidArgument("In[0] is not a matrix. Instead it has shape ",
+ a.shape().DebugString()));
+ OP_REQUIRES(
+ ctx, TensorShapeUtils::IsMatrix(b.shape()),
+ errors::InvalidArgument("In[1] is not a matrix. Instead it has shape ",
+ b.shape().DebugString()));
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0].first = transpose_a_ ? 0 : 1;
dim_pair[0].second = transpose_b_ ? 1 : 0;
diff --git a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
index 4e80f5acce..a370037d97 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_filter_ops.cc
@@ -872,12 +872,10 @@ class MklConv2DCustomBackpropFilterOp
}
// check if src and diff_dst need reorder
- std::vector<primitive> net;
T *src_data = nullptr;
if (fwd_src_md.data.format != conv2d_bwd_filter->GetSrcMemoryFormat()) {
src.SetUsrMem(fwd_src_md, &src_tensor);
- src.CheckReorderToOpMem(
- bwd_filter_pd->src_primitive_desc(), &net);
+ src.CheckReorderToOpMem(bwd_filter_pd->src_primitive_desc());
src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<T*>(const_cast<T*>(
@@ -888,15 +886,13 @@ class MklConv2DCustomBackpropFilterOp
if (diff_dst_md.data.format !=
conv2d_bwd_filter->GetDiffDstMemoryFormat()) {
diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
- diff_dst.CheckReorderToOpMem(
- bwd_filter_pd->diff_dst_primitive_desc(), &net);
+ diff_dst.CheckReorderToOpMem(bwd_filter_pd->diff_dst_primitive_desc());
diff_dst_data = static_cast<T*>(
diff_dst.GetOpMem().get_data_handle());
} else {
diff_dst_data = static_cast<T*>(const_cast<T*>(
diff_dst_tensor.flat<T>().data()));
}
- stream(stream::kind::eager).submit(net).wait();
// For backward filter, convert diff_filter back to Tensorflow layout
// Here we prepare to reorder op memory back to user memory
@@ -929,9 +925,7 @@ class MklConv2DCustomBackpropFilterOp
// Reorder diff_filter back to Tensorflow layout if necessary
if (diff_filter_reorder_required) {
- std::vector<primitive> net;
- diff_filter.InsertReorderToUserMem(&net);
- stream(stream::kind::eager).submit(net).wait();
+ diff_filter.InsertReorderToUserMem();
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
diff --git a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
index 0af4568b47..b0f7faaa1a 100644
--- a/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
+++ b/tensorflow/core/kernels/mkl_conv_grad_input_ops.cc
@@ -722,14 +722,11 @@ class MklConv2DCustomBackpropInputOp
diff_src_tensor->flat<T>().data()));
// check if filter and diff_dst need reorder
- std::vector<primitive> net;
T* filter_data = nullptr;
if (fwd_filter_md.data.format !=
conv2d_bwd_input->GetFilterMemoryFormat()) {
filter.SetUsrMem(fwd_filter_md, &filter_tensor);
- filter.CheckReorderToOpMem(
- bwd_input_pd->weights_primitive_desc(),
- &net);
+ filter.CheckReorderToOpMem(bwd_input_pd->weights_primitive_desc());
filter_data = static_cast<T*>(filter.GetOpMem().get_data_handle());
} else {
filter_data = static_cast<T*>(const_cast<T*>(
@@ -740,15 +737,13 @@ class MklConv2DCustomBackpropInputOp
if (diff_dst_md.data.format !=
conv2d_bwd_input->GetDiffDstMemoryFormat()) {
diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
- diff_dst.CheckReorderToOpMem(
- bwd_input_pd->diff_dst_primitive_desc(), &net);
+ diff_dst.CheckReorderToOpMem(bwd_input_pd->diff_dst_primitive_desc());
diff_dst_data = static_cast<T*>(
diff_dst.GetOpMem().get_data_handle());
} else {
diff_dst_data = static_cast<T*>(const_cast<T*>(
diff_dst_tensor.flat<T>().data()));
}
- stream(stream::kind::eager).submit(net).wait();
// execute convolution input bwd
conv2d_bwd_input->Execute(diff_src_data, filter_data, diff_dst_data);
diff --git a/tensorflow/core/kernels/partitioned_function_ops.cc b/tensorflow/core/kernels/partitioned_function_ops.cc
index b5c6ba1da3..a7a9609c21 100644
--- a/tensorflow/core/kernels/partitioned_function_ops.cc
+++ b/tensorflow/core/kernels/partitioned_function_ops.cc
@@ -330,6 +330,7 @@ class PartitionedCallOp : public AsyncOpKernel {
// using device-specific threadpools when available.
opts.runner = ctx->runner();
opts.source_device = local_device_name_;
+ opts.allow_dead_tensors = true;
// TODO(akshayka): Accommodate the multiple-worker scenario by adding the
// constructed rendezvous to a rendezvous manager.
Rendezvous* rendez = new IntraProcessRendezvous(lib->device_mgr());
diff --git a/tensorflow/core/kernels/reshape_util.cc b/tensorflow/core/kernels/reshape_util.cc
index ac301f3342..50fdc17916 100644
--- a/tensorflow/core/kernels/reshape_util.cc
+++ b/tensorflow/core/kernels/reshape_util.cc
@@ -28,7 +28,6 @@ limitations under the License.
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
diff --git a/tensorflow/core/kernels/resource_variable_ops.cc b/tensorflow/core/kernels/resource_variable_ops.cc
index af921e4815..c5292e1ae1 100644
--- a/tensorflow/core/kernels/resource_variable_ops.cc
+++ b/tensorflow/core/kernels/resource_variable_ops.cc
@@ -174,25 +174,20 @@ REGISTER_KERNEL_BUILDER(Name("VariableShape")
#endif // GOOGLE_CUDA
-class DestroyResourceOp : public OpKernel {
- public:
- explicit DestroyResourceOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
- OP_REQUIRES_OK(ctx,
- ctx->GetAttr("ignore_lookup_error", &ignore_lookup_error_));
- }
+DestroyResourceOp::DestroyResourceOp(OpKernelConstruction* ctx)
+ : OpKernel(ctx) {
+ OP_REQUIRES_OK(ctx,
+ ctx->GetAttr("ignore_lookup_error", &ignore_lookup_error_));
+}
- void Compute(OpKernelContext* ctx) override {
- const ResourceHandle& p = HandleFromInput(ctx, 0);
- Status status = DeleteResource(ctx, p);
- if (ignore_lookup_error_ && errors::IsNotFound(status)) {
- return;
- }
- OP_REQUIRES_OK(ctx, status);
+void DestroyResourceOp::Compute(OpKernelContext* ctx) {
+ const ResourceHandle& p = HandleFromInput(ctx, 0);
+ Status status = DeleteResource(ctx, p);
+ if (ignore_lookup_error_ && errors::IsNotFound(status)) {
+ return;
}
-
- private:
- bool ignore_lookup_error_;
-};
+ OP_REQUIRES_OK(ctx, status);
+}
REGISTER_KERNEL_BUILDER(Name("DestroyResourceOp").Device(DEVICE_CPU),
DestroyResourceOp);
diff --git a/tensorflow/core/kernels/resource_variable_ops.h b/tensorflow/core/kernels/resource_variable_ops.h
index 8cae5d21f0..9b60106f13 100644
--- a/tensorflow/core/kernels/resource_variable_ops.h
+++ b/tensorflow/core/kernels/resource_variable_ops.h
@@ -28,6 +28,15 @@ class ReadVariableOp : public OpKernel {
DataType dtype_;
};
+class DestroyResourceOp : public OpKernel {
+ public:
+ explicit DestroyResourceOp(OpKernelConstruction* ctx);
+ void Compute(OpKernelContext* ctx) override;
+
+ private:
+ bool ignore_lookup_error_;
+};
+
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_RESOURCE_VARIABLE_OPS_H_
diff --git a/tensorflow/core/kernels/scatter_nd_op.cc b/tensorflow/core/kernels/scatter_nd_op.cc
index e1fc2ea128..e0194605ce 100644
--- a/tensorflow/core/kernels/scatter_nd_op.cc
+++ b/tensorflow/core/kernels/scatter_nd_op.cc
@@ -277,6 +277,9 @@ TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_ADD_SUB_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_UPDATE_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_CPU);
TF_CALL_string(REGISTER_SCATTER_ND_CPU);
+TF_CALL_bool(REGISTER_SCATTER_ND_ADD_SUB_CPU);
+TF_CALL_bool(REGISTER_SCATTER_ND_UPDATE_CPU);
+TF_CALL_bool(REGISTER_SCATTER_ND_CPU);
// Registers GPU kernels.
#if GOOGLE_CUDA
@@ -309,6 +312,7 @@ TF_CALL_complex128(REGISTER_SCATTER_ND_ALL_GPU);
TF_CALL_int32(REGISTER_SCATTER_ND_ADD_SUB_SYCL);
TF_CALL_int32(REGISTER_SCATTER_ND_UPDATE_SYCL);
+TF_CALL_bool(REGISTER_SCATTER_ND_UPDATE_SYCL);
TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SCATTER_ND_ADD_SUB_SYCL);
TF_CALL_GPU_NUMBER_TYPES_NO_HALF(REGISTER_SCATTER_ND_UPDATE_SYCL);
#undef REGISTER_SCATTER_ND_ADD_SUB_SYCL
@@ -537,11 +541,13 @@ Status DoScatterNd(OpKernelContext* c, const Tensor& indices,
}
}
if (bad_i >= 0) {
+ auto slice_shape = indices.shape();
+ slice_shape.RemoveLastDims(1);
return errors::InvalidArgument(
- "Invalid indices: ", SliceDebugString(indices.shape(), bad_i), " = [",
+ "indices", SliceDebugString(slice_shape, bad_i), " = [",
str_util::Join(
gtl::ArraySlice<Index>(&indices_flat(bad_i, 0), slice_dim), ", "),
- "] does not index into ", shape.DebugString());
+ "] does not index into shape ", shape.DebugString());
}
return Status::OK();
}
diff --git a/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h b/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h
index 7cfffa20c5..472f5a3547 100644
--- a/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h
+++ b/tensorflow/core/kernels/scatter_nd_op_cpu_impl.h
@@ -161,15 +161,16 @@ struct ScatterNdFunctor<CPUDevice, T, Index, OP, IXDIM> {
TF_CALL_ALL_TYPES(REGISTER_SCATTER_ND_UPDATE);
REGISTER_SCATTER_ND_INDEX(string, scatter_nd_op::UpdateOp::ADD);
-TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_MATH)
-
+TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ND_MATH);
+TF_CALL_bool(REGISTER_SCATTER_ND_MATH);
#undef REGISTER_SCATTER_ND_MATH
#undef REGISTER_SCATTER_ND_UPDATE
#undef REGISTER_SCATTER_ND_INDEX
#undef REGISTER_SCATTER_ND_FULL
-#ifdef TENSORFLOW_USE_SYCL
// Implementation of update functor for SYCL.
+#ifdef TENSORFLOW_USE_SYCL
+
template <typename T, typename Index, scatter_nd_op::UpdateOp OP, int IXDIM>
struct ScatterNdFunctor<SYCLDevice, T, Index, OP, IXDIM> {
Index operator()(
diff --git a/tensorflow/core/kernels/scatter_nd_op_test.cc b/tensorflow/core/kernels/scatter_nd_op_test.cc
index c134a8dd5b..95ecc69c95 100644
--- a/tensorflow/core/kernels/scatter_nd_op_test.cc
+++ b/tensorflow/core/kernels/scatter_nd_op_test.cc
@@ -185,7 +185,7 @@ TEST_F(ScatterNdUpdateOpTest, Error_IndexOutOfRange) {
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(str_util::StrContains(
- s.ToString(), "Invalid indices: [2,0] = [99] does not index into [5,3]"))
+ s.ToString(), "indices[2] = [99] does not index into shape [5,3]"))
<< s;
}
diff --git a/tensorflow/core/kernels/sdca_internal.cc b/tensorflow/core/kernels/sdca_internal.cc
index 3e16ba8d04..1c071d3d41 100644
--- a/tensorflow/core/kernels/sdca_internal.cc
+++ b/tensorflow/core/kernels/sdca_internal.cc
@@ -18,6 +18,7 @@ limitations under the License.
#include "tensorflow/core/kernels/sdca_internal.h"
#include <limits>
+#include <numeric>
#include <random>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
diff --git a/tensorflow/core/kernels/sdca_internal.h b/tensorflow/core/kernels/sdca_internal.h
index 897c488702..1eff4b15fa 100644
--- a/tensorflow/core/kernels/sdca_internal.h
+++ b/tensorflow/core/kernels/sdca_internal.h
@@ -43,8 +43,6 @@ limitations under the License.
#include "tensorflow/core/lib/random/distribution_sampler.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/util/guarded_philox_random.h"
-#include "tensorflow/core/util/sparse/group_iterator.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
diff --git a/tensorflow/core/kernels/sendrecv_ops.cc b/tensorflow/core/kernels/sendrecv_ops.cc
index 2f87057f4e..6521dcf932 100644
--- a/tensorflow/core/kernels/sendrecv_ops.cc
+++ b/tensorflow/core/kernels/sendrecv_ops.cc
@@ -160,7 +160,6 @@ Rendezvous::DoneCallback make_recv_callback(OpKernelContext* ctx,
if (!is_dead) {
ctx->set_output(0, val);
}
- *ctx->is_output_dead() = is_dead;
}
done();
},
diff --git a/tensorflow/core/kernels/serialize_sparse_op.cc b/tensorflow/core/kernels/serialize_sparse_op.cc
index 852cef29c7..577e327809 100644
--- a/tensorflow/core/kernels/serialize_sparse_op.cc
+++ b/tensorflow/core/kernels/serialize_sparse_op.cc
@@ -190,8 +190,10 @@ class SerializeManySparseOp : public SerializeManySparseOpBase<U> {
TensorShape tensor_input_shape(input_shape->vec<int64>());
gtl::InlinedVector<int64, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
- SparseTensor input_st(*input_indices, *input_values, tensor_input_shape,
- std_order);
+ SparseTensor input_st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+ tensor_input_shape, std_order,
+ &input_st));
auto input_shape_t = input_shape->vec<int64>();
const int64 N = input_shape_t(0);
diff --git a/tensorflow/core/kernels/set_kernels.cc b/tensorflow/core/kernels/set_kernels.cc
index e836c764ac..f893d4e945 100644
--- a/tensorflow/core/kernels/set_kernels.cc
+++ b/tensorflow/core/kernels/set_kernels.cc
@@ -63,9 +63,9 @@ Status GroupShape(const VarDimArray& input_shape, ShapeArray* grouped_shape) {
// Build `SparseTensor` from indices, values, and shape in inputs
// [base_index, base_index + 3), and validate its rank and indices.
-sparse::SparseTensor SparseTensorFromContext(OpKernelContext* ctx,
- const int32 base_index,
- bool validate_indices) {
+Status SparseTensorFromContext(OpKernelContext* ctx, const int32 base_index,
+ bool validate_indices,
+ sparse::SparseTensor* tensor) {
// Assume row-major order.
const TensorShape shape =
TensorShape(ctx->input(base_index + 2).vec<int64>());
@@ -73,13 +73,8 @@ sparse::SparseTensor SparseTensorFromContext(OpKernelContext* ctx,
std::vector<int64> order(shape.dims());
std::iota(order.begin(), order.end(), 0);
- const sparse::SparseTensor st(ctx->input(base_index),
- ctx->input(base_index + 1), shape, order);
- if (validate_indices) {
- Status s = st.IndicesValid();
- if (!s.ok()) ctx->SetStatus(s);
- }
- return st;
+ return sparse::SparseTensor::Create(
+ ctx->input(base_index), ctx->input(base_index + 1), shape, order, tensor);
}
// TODO(ptucker): CheckGroup is just a sanity check on the result of
@@ -253,11 +248,13 @@ class SetSizeOp : public OpKernel {
template <typename T>
void SetSizeOp<T>::Compute(OpKernelContext* ctx) {
- const sparse::SparseTensor set_st =
- SparseTensorFromContext(ctx, 0, validate_indices_);
+ sparse::SparseTensor set_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 0, validate_indices_, &set_st));
+ OP_REQUIRES_OK(ctx, set_st.IndicesValid());
- // Output shape is same as input except for last dimension, which reduces to
- // the set size of values along that dimension.
+ // Output shape is same as input except for last dimension, which reduces
+ // to the set size of values along that dimension.
ShapeArray output_shape;
OP_REQUIRES_OK(ctx, GroupShape(set_st.shape(), &output_shape));
const auto output_strides = Strides(output_shape);
@@ -484,8 +481,10 @@ void SetOperationOp<T>::ComputeDenseToDense(OpKernelContext* ctx) const {
template <typename T>
void SetOperationOp<T>::ComputeDenseToSparse(OpKernelContext* ctx) const {
const Tensor& set1_t = ctx->input(0);
- const sparse::SparseTensor set2_st =
- SparseTensorFromContext(ctx, 1, validate_indices_);
+ sparse::SparseTensor set2_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 1, validate_indices_, &set2_st));
+ OP_REQUIRES_OK(ctx, set2_st.IndicesValid());
// The following should stay in sync with `_dense_to_sparse_shape` shape
// assertions in python/ops/set_ops.py, and `SetShapeFn` for
// `DenseToSparseSetOperation` in ops/set_ops.cc.
@@ -597,10 +596,15 @@ const std::vector<int64> GROUP_ITER_END;
// with the same first n-1 dimensions in set1 and set2.
template <typename T>
void SetOperationOp<T>::ComputeSparseToSparse(OpKernelContext* ctx) const {
- const sparse::SparseTensor set1_st =
- SparseTensorFromContext(ctx, 0, validate_indices_);
- const sparse::SparseTensor set2_st =
- SparseTensorFromContext(ctx, 3, validate_indices_);
+ sparse::SparseTensor set1_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 0, validate_indices_, &set1_st));
+ OP_REQUIRES_OK(ctx, set1_st.IndicesValid());
+
+ sparse::SparseTensor set2_st;
+ OP_REQUIRES_OK(ctx,
+ SparseTensorFromContext(ctx, 3, validate_indices_, &set2_st));
+
// The following should stay in sync with `_sparse_to_sparse_shape` shape
// assertions in python/ops/set_ops.py, and `SetShapeFn` for
// `SparseToSparseSetOperation` in ops/set_ops.cc.
diff --git a/tensorflow/core/kernels/sparse_concat_op.cc b/tensorflow/core/kernels/sparse_concat_op.cc
index f813794374..3b2a0cb0f3 100644
--- a/tensorflow/core/kernels/sparse_concat_op.cc
+++ b/tensorflow/core/kernels/sparse_concat_op.cc
@@ -124,9 +124,12 @@ class SparseConcatOp : public OpKernel {
std::vector<sparse::SparseTensor> sp_inputs;
for (int i = 0; i < N; ++i) {
const TensorShape current_shape(shapes[i].vec<int64>());
- sp_inputs.emplace_back(tensor::DeepCopy(inds[i]),
- tensor::DeepCopy(vals[i]), current_shape,
- std_order);
+ sparse::SparseTensor tensor;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(
+ tensor::DeepCopy(inds[i]), tensor::DeepCopy(vals[i]),
+ current_shape, std_order, &tensor));
+ sp_inputs.push_back(std::move(tensor));
sp_inputs[i].Reorder<T>(concat_order);
}
diff --git a/tensorflow/core/kernels/sparse_reduce_op.cc b/tensorflow/core/kernels/sparse_reduce_op.cc
index 9e60791f97..a465564739 100644
--- a/tensorflow/core/kernels/sparse_reduce_op.cc
+++ b/tensorflow/core/kernels/sparse_reduce_op.cc
@@ -172,8 +172,10 @@ class SparseReduceOp : public OpKernel {
// making deep copies here. Remove this if/when we change Reorder()'s
// semantics.
const auto shape_vec = shape_t->vec<int64>();
- SparseTensor sp(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
- TensorShape(shape_vec));
+ SparseTensor sp;
+ OP_REQUIRES_OK(ctx, SparseTensor::Create(
+ tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
+ TensorShape(shape_vec), &sp));
ReduceDetails reduction = SparseTensorReduceHelper(
sp, reduction_axes_t->flat<int32>(), keep_dims_);
@@ -260,8 +262,10 @@ class SparseReduceSparseOp : public OpKernel {
OP_REQUIRES_OK(ctx, ValidateInputs(shape_t, reduction_axes_t));
- SparseTensor sp(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
- TensorShape(shape_t->vec<int64>()));
+ SparseTensor sp;
+ OP_REQUIRES_OK(ctx, SparseTensor::Create(tensor::DeepCopy(*indices_t),
+ tensor::DeepCopy(*values_t),
+ TensorShape(shape_t->vec<int64>()), &sp));
ReduceDetails reduction = SparseTensorReduceHelper(
sp, reduction_axes_t->flat<int32>(), keep_dims_);
diff --git a/tensorflow/core/kernels/sparse_reorder_op.cc b/tensorflow/core/kernels/sparse_reorder_op.cc
index d1373fe0ef..6f9065827f 100644
--- a/tensorflow/core/kernels/sparse_reorder_op.cc
+++ b/tensorflow/core/kernels/sparse_reorder_op.cc
@@ -60,16 +60,21 @@ class SparseReorderOp : public OpKernel {
std::iota(std_order.begin(), std_order.end(), 0);
// Check if the sparse tensor is already ordered correctly
- sparse::SparseTensor input_sp(input_ind, input_val, input_shape, std_order);
+ sparse::SparseTensor input_sp;
+ OP_REQUIRES_OK(
+ context, sparse::SparseTensor::Create(input_ind, input_val, input_shape,
+ std_order, &input_sp));
if (input_sp.IndicesValid().ok()) {
context->set_output(0, input_sp.indices());
context->set_output(1, input_sp.values());
} else {
// Deep-copy the input Tensors, then reorder in-place
- sparse::SparseTensor reordered_sp(tensor::DeepCopy(input_ind),
- tensor::DeepCopy(input_val),
- input_shape);
+ sparse::SparseTensor reordered_sp;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(tensor::DeepCopy(input_ind),
+ tensor::DeepCopy(input_val),
+ input_shape, &reordered_sp));
reordered_sp.Reorder<T>(std_order);
context->set_output(0, reordered_sp.indices());
context->set_output(1, reordered_sp.values());
diff --git a/tensorflow/core/kernels/sparse_slice_grad_op.cc b/tensorflow/core/kernels/sparse_slice_grad_op.cc
index 90a39ed818..f92b6414ff 100644
--- a/tensorflow/core/kernels/sparse_slice_grad_op.cc
+++ b/tensorflow/core/kernels/sparse_slice_grad_op.cc
@@ -18,7 +18,6 @@ limitations under the License.
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
-#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
diff --git a/tensorflow/core/kernels/sparse_slice_op.cc b/tensorflow/core/kernels/sparse_slice_op.cc
index 10dc208ab6..6aaf4fd88f 100644
--- a/tensorflow/core/kernels/sparse_slice_op.cc
+++ b/tensorflow/core/kernels/sparse_slice_op.cc
@@ -66,8 +66,11 @@ class SparseSliceOp : public OpKernel {
"Expected size to be a vector of length ", input_dims,
" but got length ", input_size.NumElements()));
- sparse::SparseTensor sparse_tensor(input_indices, input_values,
- TensorShape(input_shape.vec<int64>()));
+ sparse::SparseTensor sparse_tensor;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(
+ input_indices, input_values,
+ TensorShape(input_shape.vec<int64>()), &sparse_tensor));
const gtl::ArraySlice<int64> start(input_start.flat<int64>().data(),
input_dims);
diff --git a/tensorflow/core/kernels/sparse_softmax_op.cc b/tensorflow/core/kernels/sparse_softmax_op.cc
index 444a5f657a..dc3119bba4 100644
--- a/tensorflow/core/kernels/sparse_softmax_op.cc
+++ b/tensorflow/core/kernels/sparse_softmax_op.cc
@@ -69,8 +69,11 @@ class SparseSoftmaxOp : public OpKernel {
const int nnz = static_cast<int>(indices_t->dim_size(0));
const int rank = static_cast<int>(indices_t->dim_size(1));
- SparseTensor st(tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
- TensorShape(shape_t->flat<int64>()));
+ SparseTensor st;
+ OP_REQUIRES_OK(
+ context, SparseTensor::Create(
+ tensor::DeepCopy(*indices_t), tensor::DeepCopy(*values_t),
+ TensorShape(shape_t->flat<int64>()), &st));
Tensor *output_values = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({nnz}),
diff --git a/tensorflow/core/kernels/sparse_split_op.cc b/tensorflow/core/kernels/sparse_split_op.cc
index 67dcf05a6c..3d02be47cb 100644
--- a/tensorflow/core/kernels/sparse_split_op.cc
+++ b/tensorflow/core/kernels/sparse_split_op.cc
@@ -63,10 +63,16 @@ class SparseSplitOp : public OpKernel {
input_shape.vec<int64>()(split_dim), "), got ",
num_split_));
- sparse::SparseTensor sparse_tensor(input_indices, input_values,
- TensorShape(input_shape.vec<int64>()));
- const std::vector<sparse::SparseTensor> outputs =
- sparse::SparseTensor::Split<T>(sparse_tensor, split_dim, num_split_);
+ sparse::SparseTensor sparse_tensor;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Create(
+ input_indices, input_values,
+ TensorShape(input_shape.vec<int64>()), &sparse_tensor));
+
+ std::vector<sparse::SparseTensor> outputs;
+ OP_REQUIRES_OK(context,
+ sparse::SparseTensor::Split<T>(sparse_tensor, split_dim,
+ num_split_, &outputs));
for (int slice_index = 0; slice_index < num_split_; ++slice_index) {
context->set_output(slice_index, outputs[slice_index].indices());
diff --git a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc
index 2aadd92475..74fa3a15f0 100644
--- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc
+++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc
@@ -93,8 +93,9 @@ class SparseTensorsMap : public ResourceBase {
const Tensor* ix = sp_iter->second.indices.AccessTensor(ctx);
const Tensor* values = sp_iter->second.values.AccessTensor(ctx);
const auto& shape = sp_iter->second.shape;
- sparse_tensors->emplace_back(*ix, *values, shape);
-
+ SparseTensor tensor;
+ TF_RETURN_IF_ERROR(SparseTensor::Create(*ix, *values, shape, &tensor));
+ sparse_tensors->push_back(std::move(tensor));
sp_tensors_.erase(sp_iter);
}
}
@@ -195,7 +196,9 @@ class AddSparseToTensorsMapOp : public SparseTensorAccessingOp {
TensorShapeUtils::MakeShape(input_shape->vec<int64>().data(),
input_shape->NumElements(),
&input_shape_object));
- SparseTensor st(*input_indices, *input_values, input_shape_object);
+ SparseTensor st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+ input_shape_object, &st));
int64 handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st, &handle));
@@ -253,8 +256,10 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp {
TensorShape tensor_input_shape(input_shape->vec<int64>());
gtl::InlinedVector<int64, 8> std_order(rank);
std::iota(std_order.begin(), std_order.end(), 0);
- SparseTensor input_st(*input_indices, *input_values, tensor_input_shape,
- std_order);
+ SparseTensor input_st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values,
+ tensor_input_shape, std_order,
+ &input_st));
auto input_shape_t = input_shape->vec<int64>();
const int64 N = input_shape_t(0);
@@ -300,7 +305,10 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp {
output_values_t(i) = values(i);
}
- SparseTensor st_i(output_indices, output_values, output_shape);
+ SparseTensor st_i;
+ OP_REQUIRES_OK(context,
+ SparseTensor::Create(output_indices, output_values,
+ output_shape, &st_i));
int64 handle;
OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle));
sparse_handles_t(b) = handle;
@@ -311,7 +319,9 @@ class AddManySparseToTensorsMapOp : public SparseTensorAccessingOp {
if (visited.size() < N) {
Tensor empty_indices(DT_INT64, {0, rank - 1});
Tensor empty_values(DataTypeToEnum<T>::value, {0});
- SparseTensor empty_st(empty_indices, empty_values, output_shape);
+ SparseTensor empty_st;
+ OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values,
+ output_shape, &empty_st));
for (int64 b = 0; b < N; ++b) {
// We skipped this batch entry.
@@ -466,13 +476,15 @@ class TakeManySparseFromTensorsMapOp : public SparseTensorAccessingOp {
std::vector<SparseTensor> tensors_to_concat;
tensors_to_concat.reserve(N);
for (int i = 0; i < N; ++i) {
- tensors_to_concat.emplace_back(std::move(indices_to_concat[i]),
- std::move(values_to_concat[i]),
- preconcat_shape, std_order);
+ SparseTensor tensor;
+ OP_REQUIRES_OK(context,
+ SparseTensor::Create(std::move(indices_to_concat[i]),
+ std::move(values_to_concat[i]),
+ preconcat_shape, std_order, &tensor));
+ tensors_to_concat.push_back(std::move(tensor));
}
- SparseTensor output(SparseTensor::Concat<T>(tensors_to_concat));
-
+ auto output = SparseTensor::Concat<T>(tensors_to_concat);
Tensor final_output_shape(DT_INT64, TensorShape({output.dims()}));
std::copy_n(output.shape().data(), output.dims(),
diff --git a/tensorflow/core/kernels/sparse_to_dense_op.cc b/tensorflow/core/kernels/sparse_to_dense_op.cc
index ba3da21a43..f79a4d0494 100644
--- a/tensorflow/core/kernels/sparse_to_dense_op.cc
+++ b/tensorflow/core/kernels/sparse_to_dense_op.cc
@@ -119,8 +119,10 @@ class SparseToDense : public OpKernel {
// Assume SparseTensor is lexicographically sorted.
gtl::InlinedVector<int64, 8> order(output->shape().dims());
std::iota(order.begin(), order.end(), 0);
- sparse::SparseTensor st(indices_shaped, sparse_values_b, output->shape(),
- order);
+ sparse::SparseTensor st;
+ OP_REQUIRES_OK(c,
+ sparse::SparseTensor::Create(indices_shaped, sparse_values_b,
+ output->shape(), order, &st));
if (validate_indices_) {
OP_REQUIRES_OK(c, st.IndicesValid());
diff --git a/tensorflow/core/lib/core/refcount.h b/tensorflow/core/lib/core/refcount.h
index eb41f9ff36..87bcfec411 100644
--- a/tensorflow/core/lib/core/refcount.h
+++ b/tensorflow/core/lib/core/refcount.h
@@ -17,6 +17,8 @@ limitations under the License.
#define TENSORFLOW_LIB_CORE_REFCOUNT_H_
#include <atomic>
+#include <memory>
+
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
@@ -58,6 +60,15 @@ class RefCounted {
void operator=(const RefCounted&) = delete;
};
+// A deleter class to form a std::unique_ptr that unrefs objects.
+struct RefCountDeleter {
+ void operator()(tensorflow::core::RefCounted* o) const { o->Unref(); }
+};
+
+// A unique_ptr that unrefs the owned object on destruction.
+template <typename T>
+using RefCountPtr = std::unique_ptr<T, RefCountDeleter>;
+
// Helper class to unref an object when out-of-scope.
class ScopedUnref {
public:
diff --git a/tensorflow/core/lib/db/sqlite.cc b/tensorflow/core/lib/db/sqlite.cc
index cb6943379d..cf11f3a331 100644
--- a/tensorflow/core/lib/db/sqlite.cc
+++ b/tensorflow/core/lib/db/sqlite.cc
@@ -112,6 +112,7 @@ Status EnvPragma(Sqlite* db, const char* pragma, const char* var) {
/* static */
Status Sqlite::Open(const string& path, int flags, Sqlite** db) {
flags |= SQLITE_OPEN_PRIVATECACHE;
+ flags |= SQLITE_OPEN_URI;
sqlite3* sqlite = nullptr;
int rc = sqlite3_open_v2(path.c_str(), &sqlite, flags, nullptr);
if (rc != SQLITE_OK) {
diff --git a/tensorflow/core/lib/io/record_reader_writer_test.cc b/tensorflow/core/lib/io/record_reader_writer_test.cc
index 95ac040602..c36c909399 100644
--- a/tensorflow/core/lib/io/record_reader_writer_test.cc
+++ b/tensorflow/core/lib/io/record_reader_writer_test.cc
@@ -16,6 +16,7 @@ limitations under the License.
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
+#include <zlib.h>
#include <vector>
#include "tensorflow/core/platform/env.h"
@@ -33,6 +34,89 @@ static std::vector<int> BufferSizes() {
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
+namespace {
+
+io::RecordReaderOptions GetMatchingReaderOptions(
+ const io::RecordWriterOptions& options) {
+ if (options.compression_type == io::RecordWriterOptions::ZLIB_COMPRESSION) {
+ return io::RecordReaderOptions::CreateRecordReaderOptions("ZLIB");
+ }
+ return io::RecordReaderOptions::CreateRecordReaderOptions("");
+}
+
+uint64 GetFileSize(const string& fname) {
+ Env* env = Env::Default();
+ uint64 fsize;
+ TF_CHECK_OK(env->GetFileSize(fname, &fsize));
+ return fsize;
+}
+
+void VerifyFlush(const io::RecordWriterOptions& options) {
+ std::vector<string> records = {
+ "abcdefghijklmnopqrstuvwxyz",
+ "ZYXWVUTSRQPONMLKJIHGFEDCBA0123456789!@#$%^&*()",
+ "G5SyohOL9UmXofSOOwWDrv9hoLLMYPJbG9r38t3uBRcHxHj2PdKcPDuZmKW62RIY",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaa",
+ };
+
+ Env* env = Env::Default();
+ string fname = testing::TmpDir() + "/record_reader_writer_flush_test";
+
+ std::unique_ptr<WritableFile> file;
+ TF_CHECK_OK(env->NewWritableFile(fname, &file));
+ io::RecordWriter writer(file.get(), options);
+
+ std::unique_ptr<RandomAccessFile> read_file;
+ TF_CHECK_OK(env->NewRandomAccessFile(fname, &read_file));
+ io::RecordReaderOptions read_options = GetMatchingReaderOptions(options);
+ io::RecordReader reader(read_file.get(), read_options);
+
+ EXPECT_EQ(GetFileSize(fname), 0);
+ for (size_t i = 0; i < records.size(); i++) {
+ uint64 start_size = GetFileSize(fname);
+
+ // Write a new record.
+ TF_EXPECT_OK(writer.WriteRecord(records[i]));
+ TF_CHECK_OK(writer.Flush());
+ TF_CHECK_OK(file->Flush());
+
+ // Verify that file size has changed after file flush.
+ uint64 new_size = GetFileSize(fname);
+ EXPECT_GT(new_size, start_size);
+
+ // Verify that file has all records written so far and no more.
+ uint64 offset = 0;
+ string record;
+ for (size_t j = 0; j <= i; j++) {
+ // Check that j'th record is written correctly.
+ TF_CHECK_OK(reader.ReadRecord(&offset, &record));
+ EXPECT_EQ(record, records[j]);
+ }
+
+ // Verify that file has no more records.
+ CHECK_EQ(reader.ReadRecord(&offset, &record).code(), error::OUT_OF_RANGE);
+ }
+}
+
+} // namespace
+
+TEST(RecordReaderWriterTest, TestFlush) {
+ io::RecordWriterOptions options;
+ VerifyFlush(options);
+}
+
+TEST(RecordReaderWriterTest, TestZlibSyncFlush) {
+ io::RecordWriterOptions options;
+ options.compression_type = io::RecordWriterOptions::ZLIB_COMPRESSION;
+ // The default flush_mode is Z_NO_FLUSH and only writes to the file when the
+ // buffer is full or the file is closed, which makes testing harder.
+ // By using Z_SYNC_FLUSH the test can verify Flush does write out records of
+ // approximately the right size at the right times.
+ options.zlib_options.flush_mode = Z_SYNC_FLUSH;
+
+ VerifyFlush(options);
+}
+
TEST(RecordReaderWriterTest, TestBasics) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/record_reader_writer_test";
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java b/tensorflow/core/lib/io/zlib_compression_options.cc
index ab34f6aa12..fc54083be1 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFBool.java
+++ b/tensorflow/core/lib/io/zlib_compression_options.cc
@@ -1,4 +1,4 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,19 +12,21 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-package org.tensorflow.types;
+#include "tensorflow/core/lib/io/zlib_compression_options.h"
-import org.tensorflow.DataType;
+#include <zlib.h>
-/** Represents a boolean. */
-public class TFBool implements TFType {
- private TFBool() {}
- static {
- Types.typeCodes.put(TFBool.class, DataType.BOOL);
- }
- static {
- Types.scalars.put(TFBool.class, false);
- }
+namespace tensorflow {
+namespace io {
+
+ZlibCompressionOptions::ZlibCompressionOptions() {
+ flush_mode = Z_NO_FLUSH;
+ window_bits = MAX_WBITS;
+ compression_level = Z_DEFAULT_COMPRESSION;
+ compression_method = Z_DEFLATED;
+ compression_strategy = Z_DEFAULT_STRATEGY;
}
+
+} // namespace io
+} // namespace tensorflow
diff --git a/tensorflow/core/lib/io/zlib_compression_options.h b/tensorflow/core/lib/io/zlib_compression_options.h
index dc7218e866..238c1464fb 100644
--- a/tensorflow/core/lib/io/zlib_compression_options.h
+++ b/tensorflow/core/lib/io/zlib_compression_options.h
@@ -16,8 +16,6 @@ limitations under the License.
#ifndef TENSORFLOW_LIB_IO_ZLIB_COMPRESSION_OPTIONS_H_
#define TENSORFLOW_LIB_IO_ZLIB_COMPRESSION_OPTIONS_H_
-#include <zlib.h>
-
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
@@ -25,11 +23,14 @@ namespace io {
class ZlibCompressionOptions {
public:
+ ZlibCompressionOptions();
+
static ZlibCompressionOptions DEFAULT();
static ZlibCompressionOptions RAW();
static ZlibCompressionOptions GZIP();
- int8 flush_mode = Z_NO_FLUSH;
+ // Defaults to Z_NO_FLUSH
+ int8 flush_mode;
// Size of the buffer used for caching the data read from source file.
int64 input_buffer_size = 256 << 10;
@@ -71,7 +72,9 @@ class ZlibCompressionOptions {
// window_bits value provided used while compressing. If a compressed stream
// with a larger window size is given as input, inflate() will return with the
// error code Z_DATA_ERROR instead of trying to allocate a larger window.
- int8 window_bits = MAX_WBITS;
+ //
+ // Defaults to MAX_WBITS
+ int8 window_bits;
// From the zlib manual (http://www.zlib.net/manual.html):
// The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
@@ -79,10 +82,10 @@ class ZlibCompressionOptions {
// (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION
// requests a default compromise between speed and compression (currently
// equivalent to level 6).
- int8 compression_level = Z_DEFAULT_COMPRESSION;
+ int8 compression_level;
- // The only one supported at this time.
- int8 compression_method = Z_DEFLATED;
+ // Only Z_DEFLATED is supported at this time.
+ int8 compression_method;
// From the zlib manual (http://www.zlib.net/manual.html):
// The mem_level parameter specifies how much memory should be allocated for
@@ -106,7 +109,7 @@ class ZlibCompressionOptions {
// but not the correctness of the compressed output even if it is not set
// appropriately. Z_FIXED prevents the use of dynamic Huffman codes, allowing
// for a simpler decoder for special applications.
- int8 compression_strategy = Z_DEFAULT_STRATEGY;
+ int8 compression_strategy;
};
inline ZlibCompressionOptions ZlibCompressionOptions::DEFAULT() {
diff --git a/tensorflow/core/lib/io/zlib_inputstream.cc b/tensorflow/core/lib/io/zlib_inputstream.cc
index 47de36bf6c..d069db6d20 100644
--- a/tensorflow/core/lib/io/zlib_inputstream.cc
+++ b/tensorflow/core/lib/io/zlib_inputstream.cc
@@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+#include <zlib.h>
+
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/lib/strings/strcat.h"
@@ -21,6 +23,35 @@ limitations under the License.
namespace tensorflow {
namespace io {
+struct ZStreamDef {
+ ZStreamDef(size_t input_buffer_capacity, size_t output_buffer_capacity)
+ : input(new Bytef[input_buffer_capacity]),
+ output(new Bytef[output_buffer_capacity]),
+ stream(new z_stream) {}
+
+ // Buffer for storing contents read from compressed stream.
+ // TODO(srbs): Consider using circular buffers. That would greatly simplify
+ // the implementation.
+ std::unique_ptr<Bytef[]> input;
+
+ // Buffer for storing inflated contents of `input_stream_`.
+ std::unique_ptr<Bytef[]> output;
+
+ // Configuration passed to `inflate`.
+ //
+ // z_stream_def_->stream->next_in:
+ // Next byte to de-compress. Points to some byte in
+ // z_stream_def_->streamdef_.input buffer.
+ // z_stream_def_->stream->avail_in:
+ // Number of bytes available to be decompressed at this time.
+ // z_stream_def_->stream->next_out:
+ // Next byte to write de-compressed data to. Points to some byte in
+ // z_stream_def_->streamdef_.output buffer.
+ // z_stream_def_->stream->avail_out:
+ // Number of free bytes available at write location.
+ std::unique_ptr<z_stream> stream;
+};
+
ZlibInputStream::ZlibInputStream(
InputStreamInterface* input_stream,
size_t input_buffer_bytes, // size of z_stream.next_in buffer
@@ -30,10 +61,9 @@ ZlibInputStream::ZlibInputStream(
input_stream_(input_stream),
input_buffer_capacity_(input_buffer_bytes),
output_buffer_capacity_(output_buffer_bytes),
- z_stream_input_(new Bytef[input_buffer_capacity_]),
- z_stream_output_(new Bytef[output_buffer_capacity_]),
zlib_options_(zlib_options),
- z_stream_(new z_stream),
+ z_stream_def_(
+ new ZStreamDef(input_buffer_capacity_, output_buffer_capacity_)),
bytes_read_(0) {
InitZlibBuffer();
}
@@ -46,8 +76,8 @@ ZlibInputStream::ZlibInputStream(InputStreamInterface* input_stream,
zlib_options, false) {}
ZlibInputStream::~ZlibInputStream() {
- if (z_stream_) {
- inflateEnd(z_stream_.get());
+ if (z_stream_def_->stream) {
+ inflateEnd(z_stream_def_->stream.get());
}
if (owns_input_stream_) {
delete input_stream_;
@@ -56,51 +86,54 @@ ZlibInputStream::~ZlibInputStream() {
Status ZlibInputStream::Reset() {
TF_RETURN_IF_ERROR(input_stream_->Reset());
- inflateEnd(z_stream_.get());
+ inflateEnd(z_stream_def_->stream.get());
InitZlibBuffer();
bytes_read_ = 0;
return Status::OK();
}
void ZlibInputStream::InitZlibBuffer() {
- memset(z_stream_.get(), 0, sizeof(z_stream));
+ memset(z_stream_def_->stream.get(), 0, sizeof(z_stream));
- z_stream_->zalloc = Z_NULL;
- z_stream_->zfree = Z_NULL;
- z_stream_->opaque = Z_NULL;
- z_stream_->next_in = Z_NULL;
- z_stream_->avail_in = 0;
+ z_stream_def_->stream->zalloc = Z_NULL;
+ z_stream_def_->stream->zfree = Z_NULL;
+ z_stream_def_->stream->opaque = Z_NULL;
+ z_stream_def_->stream->next_in = Z_NULL;
+ z_stream_def_->stream->avail_in = 0;
- int status = inflateInit2(z_stream_.get(), zlib_options_.window_bits);
+ int status =
+ inflateInit2(z_stream_def_->stream.get(), zlib_options_.window_bits);
CHECK_EQ(status, Z_OK) << "inflateInit failed with status " << status;
- z_stream_->next_in = z_stream_input_.get();
- z_stream_->next_out = z_stream_output_.get();
- next_unread_byte_ = reinterpret_cast<char*>(z_stream_output_.get());
- z_stream_->avail_in = 0;
- z_stream_->avail_out = output_buffer_capacity_;
+ z_stream_def_->stream->next_in = z_stream_def_->input.get();
+ z_stream_def_->stream->next_out = z_stream_def_->output.get();
+ next_unread_byte_ = reinterpret_cast<char*>(z_stream_def_->output.get());
+ z_stream_def_->stream->avail_in = 0;
+ z_stream_def_->stream->avail_out = output_buffer_capacity_;
}
Status ZlibInputStream::ReadFromStream() {
int bytes_to_read = input_buffer_capacity_;
- char* read_location = reinterpret_cast<char*>(z_stream_input_.get());
+ char* read_location = reinterpret_cast<char*>(z_stream_def_->input.get());
// If there are unread bytes in the input stream we move them to the head
// of the stream to maximize the space available to read new data into.
- if (z_stream_->avail_in > 0) {
- uLong read_bytes = z_stream_->next_in - z_stream_input_.get();
+ if (z_stream_def_->stream->avail_in > 0) {
+ uLong read_bytes =
+ z_stream_def_->stream->next_in - z_stream_def_->input.get();
// Remove `read_bytes` from the head of the input stream.
// Move unread bytes to the head of the input stream.
if (read_bytes > 0) {
- memmove(z_stream_input_.get(), z_stream_->next_in, z_stream_->avail_in);
+ memmove(z_stream_def_->input.get(), z_stream_def_->stream->next_in,
+ z_stream_def_->stream->avail_in);
}
- bytes_to_read -= z_stream_->avail_in;
- read_location += z_stream_->avail_in;
+ bytes_to_read -= z_stream_def_->stream->avail_in;
+ read_location += z_stream_def_->stream->avail_in;
}
string data;
- // Try to read enough data to fill up z_stream_input_.
+ // Try to read enough data to fill up z_stream_def_->input.
// TODO(rohanj): Add a char* version of ReadNBytes to InputStreamInterface
// and use that instead to make this more efficient.
Status s = input_stream_->ReadNBytes(bytes_to_read, &data);
@@ -108,10 +141,10 @@ Status ZlibInputStream::ReadFromStream() {
// Since we moved unread data to the head of the input stream we can point
// next_in to the head of the input stream.
- z_stream_->next_in = z_stream_input_.get();
+ z_stream_def_->stream->next_in = z_stream_def_->input.get();
// Note: data.size() could be different from bytes_to_read.
- z_stream_->avail_in += data.size();
+ z_stream_def_->stream->avail_in += data.size();
if (!s.ok() && !errors::IsOutOfRange(s)) {
return s;
@@ -135,7 +168,8 @@ Status ZlibInputStream::ReadFromStream() {
size_t ZlibInputStream::ReadBytesFromCache(size_t bytes_to_read,
string* result) {
size_t unread_bytes =
- reinterpret_cast<char*>(z_stream_->next_out) - next_unread_byte_;
+ reinterpret_cast<char*>(z_stream_def_->stream->next_out) -
+ next_unread_byte_;
size_t can_read_bytes = std::min(bytes_to_read, unread_bytes);
if (can_read_bytes > 0) {
result->append(next_unread_byte_, can_read_bytes);
@@ -147,8 +181,9 @@ size_t ZlibInputStream::ReadBytesFromCache(size_t bytes_to_read,
size_t ZlibInputStream::NumUnreadBytes() const {
size_t read_bytes =
- next_unread_byte_ - reinterpret_cast<char*>(z_stream_output_.get());
- return output_buffer_capacity_ - z_stream_->avail_out - read_bytes;
+ next_unread_byte_ - reinterpret_cast<char*>(z_stream_def_->output.get());
+ return output_buffer_capacity_ - z_stream_def_->stream->avail_out -
+ read_bytes;
}
Status ZlibInputStream::ReadNBytes(int64 bytes_to_read, string* result) {
@@ -167,14 +202,14 @@ Status ZlibInputStream::ReadNBytes(int64 bytes_to_read, string* result) {
// completely consumed. This is an optimization and can be removed if
// it causes problems. `ReadFromStream` is capable of handling partially
// filled up buffers.
- if (z_stream_->avail_in == 0) {
+ if (z_stream_def_->stream->avail_in == 0) {
TF_RETURN_IF_ERROR(ReadFromStream());
}
// Step 2. Setup output stream.
- z_stream_->next_out = z_stream_output_.get();
- next_unread_byte_ = reinterpret_cast<char*>(z_stream_output_.get());
- z_stream_->avail_out = output_buffer_capacity_;
+ z_stream_def_->stream->next_out = z_stream_def_->output.get();
+ next_unread_byte_ = reinterpret_cast<char*>(z_stream_def_->output.get());
+ z_stream_def_->stream->avail_out = output_buffer_capacity_;
// Step 3. Inflate Inflate Inflate!
TF_RETURN_IF_ERROR(Inflate());
@@ -188,12 +223,12 @@ Status ZlibInputStream::ReadNBytes(int64 bytes_to_read, string* result) {
int64 ZlibInputStream::Tell() const { return bytes_read_; }
Status ZlibInputStream::Inflate() {
- int error = inflate(z_stream_.get(), zlib_options_.flush_mode);
+ int error = inflate(z_stream_def_->stream.get(), zlib_options_.flush_mode);
if (error != Z_OK && error != Z_STREAM_END) {
string error_string =
strings::StrCat("inflate() failed with error ", error);
- if (z_stream_->msg != nullptr) {
- strings::StrAppend(&error_string, ": ", z_stream_->msg);
+ if (z_stream_def_->stream->msg != nullptr) {
+ strings::StrAppend(&error_string, ": ", z_stream_def_->stream->msg);
}
return errors::DataLoss(error_string);
}
diff --git a/tensorflow/core/lib/io/zlib_inputstream.h b/tensorflow/core/lib/io/zlib_inputstream.h
index 37339163ee..ac9e23ca97 100644
--- a/tensorflow/core/lib/io/zlib_inputstream.h
+++ b/tensorflow/core/lib/io/zlib_inputstream.h
@@ -16,8 +16,6 @@ limitations under the License.
#ifndef TENSORFLOW_LIB_IO_ZLIB_INPUTSTREAM_H_
#define TENSORFLOW_LIB_IO_ZLIB_INPUTSTREAM_H_
-#include <zlib.h>
-
#include <string>
#include "tensorflow/core/lib/core/status.h"
@@ -30,6 +28,10 @@ limitations under the License.
namespace tensorflow {
namespace io {
+// Forward declare some members of zlib.h, which is only included in the
+// .cc file.
+struct ZStreamDef;
+
// An ZlibInputStream provides support for reading from a stream compressed
// using zlib (http://www.zlib.net/). Buffers the contents of the file.
//
@@ -79,28 +81,9 @@ class ZlibInputStream : public InputStreamInterface {
size_t output_buffer_capacity_; // Size of z_stream_output_
char* next_unread_byte_; // Next unread byte in z_stream_output_
- // Buffer for storing contents read from compressed stream.
- // TODO(srbs): Consider using circular buffers. That would greatly simplify
- // the implementation.
- std::unique_ptr<Bytef[]> z_stream_input_;
-
- // Buffer for storing inflated contents of `input_stream_`.
- std::unique_ptr<Bytef[]> z_stream_output_;
-
ZlibCompressionOptions const zlib_options_;
- // Configuration passed to `inflate`.
- //
- // z_stream_->next_in:
- // Next byte to de-compress. Points to some byte in z_stream_input_ buffer.
- // z_stream_->avail_in:
- // Number of bytes available to be decompressed at this time.
- // z_stream_->next_out:
- // Next byte to write de-compressed data to. Points to some byte in
- // z_stream_output_ buffer.
- // z_stream_->avail_out:
- // Number of free bytes available at write location.
- std::unique_ptr<z_stream> z_stream_;
+ std::unique_ptr<ZStreamDef> z_stream_def_;
// Reads data from `input_stream_` and tries to fill up `z_stream_input_` if
// enough unread data is left in `input_stream_`.
diff --git a/tensorflow/core/ops/array_ops.cc b/tensorflow/core/ops/array_ops.cc
index fce0b93cd7..d6ae75473f 100644
--- a/tensorflow/core/ops/array_ops.cc
+++ b/tensorflow/core/ops/array_ops.cc
@@ -2549,14 +2549,16 @@ REGISTER_OP("ExtractImagePatches")
REGISTER_OP("Bitcast")
.Input("input: T")
.Output("output: type")
- // All supported dtypes are listed here to include qint16 and quint16.
+ // All supported dtypes are listed here to include qint16, quint16, uint32,
+ // and uint64.
.Attr(
- "T: {bfloat16, half, float, double, int64, int32, uint8, uint16, int8, "
- "int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32}")
+ "T: {bfloat16, half, float, double, int64, int32, uint8, uint16, "
+ "uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, "
+ "qint16, quint16, qint32}")
.Attr(
"type: {bfloat16, half, float, double, int64, int32, uint8, uint16, "
- "int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, "
- "qint32}")
+ "uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, "
+ "qint16, quint16, qint32}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input = c->input(0);
if (!c->RankKnown(input)) {
@@ -2879,7 +2881,7 @@ REGISTER_OP("ScatterNdNonAliasingAdd")
.Input("indices: Tindices")
.Input("updates: T")
.Output("output: T")
- .Attr("T: numbertype")
+ .Attr("T: {numbertype, bool}")
.Attr("Tindices: {int32, int64}")
.SetShapeFn(shape_inference::ScatterNdUpdateShape);
diff --git a/tensorflow/core/ops/compat/ops_history.v1.pbtxt b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
index 6cdd03e6a0..69351cd392 100644
--- a/tensorflow/core/ops/compat/ops_history.v1.pbtxt
+++ b/tensorflow/core/ops/compat/ops_history.v1.pbtxt
@@ -7681,66 +7681,6 @@ op {
}
}
op {
- name: "AvgPool"
- input_arg {
- name: "value"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
-}
-op {
name: "AvgPool3D"
input_arg {
name: "input"
@@ -8430,70 +8370,6 @@ op {
}
}
op {
- name: "AvgPoolGrad"
- input_arg {
- name: "orig_input_shape"
- type: DT_INT32
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
-}
-op {
name: "Barrier"
output_arg {
name: "handle"
@@ -10555,61 +10431,6 @@ op {
}
}
op {
- name: "BiasAdd"
- input_arg {
- name: "value"
- type_attr: "T"
- }
- input_arg {
- name: "bias"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_COMPLEX64
- type: DT_INT64
- type: DT_QINT8
- type: DT_QUINT8
- type: DT_QINT32
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_COMPLEX128
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
-}
-op {
name: "BiasAddGrad"
input_arg {
name: "out_backprop"
@@ -10802,57 +10623,6 @@ op {
}
}
op {
- name: "BiasAddGrad"
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_COMPLEX64
- type: DT_INT64
- type: DT_QINT8
- type: DT_QUINT8
- type: DT_QINT32
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_COMPLEX128
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
-}
-op {
name: "BiasAddV1"
input_arg {
name: "value"
@@ -11276,6 +11046,71 @@ op {
}
}
op {
+ name: "Bitcast"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "type"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_BFLOAT16
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_UINT32
+ type: DT_UINT64
+ type: DT_INT8
+ type: DT_INT16
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT16
+ type: DT_QUINT16
+ type: DT_QINT32
+ }
+ }
+ }
+ attr {
+ name: "type"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_BFLOAT16
+ type: DT_HALF
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT64
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_UINT16
+ type: DT_UINT32
+ type: DT_UINT64
+ type: DT_INT8
+ type: DT_INT16
+ type: DT_COMPLEX64
+ type: DT_COMPLEX128
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT16
+ type: DT_QUINT16
+ type: DT_QINT32
+ }
+ }
+ }
+}
+op {
name: "BitwiseAnd"
input_arg {
name: "x"
@@ -13457,144 +13292,6 @@ op {
}
}
op {
- name: "Conv2D"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "use_cudnn_on_gpu"
- type: "bool"
- default_value {
- b: true
- }
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
- name: "Conv2DBackpropFilter"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_FLOAT
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "use_cudnn_on_gpu"
- type: "bool"
- default_value {
- b: true
- }
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- }
- }
- }
-}
-op {
name: "Conv2DBackpropFilter"
input_arg {
name: "input"
@@ -13618,7 +13315,6 @@ op {
allowed_values {
list {
type: DT_HALF
- type: DT_BFLOAT16
type: DT_FLOAT
}
}
@@ -13657,18 +13353,6 @@ op {
}
}
}
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
}
op {
name: "Conv2DBackpropFilter"
@@ -13696,7 +13380,6 @@ op {
type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
- type: DT_DOUBLE
}
}
}
@@ -13808,8 +13491,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -14043,85 +13724,6 @@ op {
}
}
op {
- name: "Conv2DBackpropInput"
- input_arg {
- name: "input_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "use_cudnn_on_gpu"
- type: "bool"
- default_value {
- b: true
- }
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
name: "Conv3D"
input_arg {
name: "input"
@@ -18852,117 +18454,6 @@ op {
}
}
op {
- name: "DepthwiseConv2dNative"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
- name: "DepthwiseConv2dNativeBackpropFilter"
- input_arg {
- name: "input"
- type_attr: "T"
- }
- input_arg {
- name: "filter_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
-}
-op {
name: "DepthwiseConv2dNativeBackpropFilter"
input_arg {
name: "input"
@@ -19004,19 +18495,6 @@ op {
}
}
}
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- }
- }
- }
}
op {
name: "DepthwiseConv2dNativeBackpropFilter"
@@ -19041,7 +18519,6 @@ op {
type: "type"
allowed_values {
list {
- type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
}
@@ -19074,18 +18551,6 @@ op {
}
}
}
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
}
op {
name: "DepthwiseConv2dNativeBackpropFilter"
@@ -19110,7 +18575,6 @@ op {
type: "type"
allowed_values {
list {
- type: DT_HALF
type: DT_BFLOAT16
type: DT_FLOAT
type: DT_DOUBLE
@@ -19211,8 +18675,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -19468,78 +18930,6 @@ op {
}
}
op {
- name: "DepthwiseConv2dNativeBackpropInput"
- input_arg {
- name: "input_sizes"
- type: DT_INT32
- }
- input_arg {
- name: "filter"
- type_attr: "T"
- }
- input_arg {
- name: "out_backprop"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_HALF
- type: DT_BFLOAT16
- type: DT_FLOAT
- type: DT_DOUBLE
- }
- }
- }
- attr {
- name: "strides"
- type: "list(int)"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "dilations"
- type: "list(int)"
- default_value {
- list {
- i: 1
- i: 1
- i: 1
- i: 1
- }
- }
- }
-}
-op {
name: "Dequantize"
input_arg {
name: "input"
@@ -32234,85 +31624,6 @@ op {
}
}
op {
- name: "MaxPoolGrad"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- default_value {
- type: DT_FLOAT
- }
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradGrad"
input_arg {
name: "orig_input"
@@ -32605,82 +31916,6 @@ op {
}
}
op {
- name: "MaxPoolGradGrad"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "ksize"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "strides"
- type: "list(int)"
- has_minimum: true
- minimum: 4
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradGradV2"
input_arg {
name: "orig_input"
@@ -32957,78 +32192,6 @@ op {
}
}
op {
- name: "MaxPoolGradGradV2"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- input_arg {
- name: "ksize"
- type: DT_INT32
- }
- input_arg {
- name: "strides"
- type: DT_INT32
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradGradWithArgmax"
input_arg {
name: "input"
@@ -33597,81 +32760,6 @@ op {
}
}
op {
- name: "MaxPoolGradV2"
- input_arg {
- name: "orig_input"
- type_attr: "T"
- }
- input_arg {
- name: "orig_output"
- type_attr: "T"
- }
- input_arg {
- name: "grad"
- type_attr: "T"
- }
- input_arg {
- name: "ksize"
- type: DT_INT32
- }
- input_arg {
- name: "strides"
- type: DT_INT32
- }
- output_arg {
- name: "output"
- type_attr: "T"
- }
- attr {
- name: "padding"
- type: "string"
- allowed_values {
- list {
- s: "SAME"
- s: "VALID"
- }
- }
- }
- attr {
- name: "data_format"
- type: "string"
- default_value {
- s: "NHWC"
- }
- allowed_values {
- list {
- s: "NHWC"
- s: "NCHW"
- s: "HWNC"
- s: "HWCN"
- }
- }
- }
- attr {
- name: "T"
- type: "type"
- default_value {
- type: DT_FLOAT
- }
- allowed_values {
- list {
- type: DT_FLOAT
- type: DT_DOUBLE
- type: DT_INT32
- type: DT_UINT8
- type: DT_INT16
- type: DT_INT8
- type: DT_INT64
- type: DT_BFLOAT16
- type: DT_UINT16
- type: DT_HALF
- type: DT_UINT32
- type: DT_UINT64
- }
- }
- }
-}
-op {
name: "MaxPoolGradWithArgmax"
input_arg {
name: "input"
@@ -56155,6 +55243,61 @@ op {
}
}
op {
+ name: "ScatterNdNonAliasingAdd"
+ input_arg {
+ name: "input"
+ type_attr: "T"
+ }
+ input_arg {
+ name: "indices"
+ type_attr: "Tindices"
+ }
+ input_arg {
+ name: "updates"
+ type_attr: "T"
+ }
+ output_arg {
+ name: "output"
+ type_attr: "T"
+ }
+ attr {
+ name: "T"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_FLOAT
+ type: DT_DOUBLE
+ type: DT_INT32
+ type: DT_UINT8
+ type: DT_INT16
+ type: DT_INT8
+ type: DT_COMPLEX64
+ type: DT_INT64
+ type: DT_QINT8
+ type: DT_QUINT8
+ type: DT_QINT32
+ type: DT_BFLOAT16
+ type: DT_UINT16
+ type: DT_COMPLEX128
+ type: DT_HALF
+ type: DT_UINT32
+ type: DT_UINT64
+ type: DT_BOOL
+ }
+ }
+ }
+ attr {
+ name: "Tindices"
+ type: "type"
+ allowed_values {
+ list {
+ type: DT_INT32
+ type: DT_INT64
+ }
+ }
+ }
+}
+op {
name: "ScatterNdSub"
input_arg {
name: "ref"
@@ -59148,7 +58291,11 @@ op {
type: DT_INT64
}
input_arg {
- name: "stride"
+ name: "window_shift"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "window_stride"
type: DT_INT64
}
output_arg {
diff --git a/tensorflow/core/ops/dataset_ops.cc b/tensorflow/core/ops/dataset_ops.cc
index c8bc11155a..8c83a09597 100644
--- a/tensorflow/core/ops/dataset_ops.cc
+++ b/tensorflow/core/ops/dataset_ops.cc
@@ -404,19 +404,20 @@ REGISTER_OP("BatchDatasetV2")
return shape_inference::ScalarShape(c);
});
-// TODO(mrry): move SlideDataset to contrib in the future.
REGISTER_OP("SlideDataset")
.Input("input_dataset: variant")
.Input("window_size: int64")
- .Input("stride: int64")
+ .Input("window_shift: int64")
+ .Input("window_stride: int64")
.Output("handle: variant")
.Attr("output_types: list(type) >= 1")
.Attr("output_shapes: list(shape) >= 1")
.SetShapeFn([](shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
- // window_size and stride should be scalars.
+ // window_size, window_shift, and window_stride should be scalars.
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
return shape_inference::ScalarShape(c);
});
diff --git a/tensorflow/core/ops/debug_ops.cc b/tensorflow/core/ops/debug_ops.cc
index 5aebdca1ea..2d9b4360de 100644
--- a/tensorflow/core/ops/debug_ops.cc
+++ b/tensorflow/core/ops/debug_ops.cc
@@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
-// EXPERIMENTAL: tfdbg debugger-inserted ops.
+// TensorFlow Debugger-inserted ops.
// These ops are used only internally by tfdbg. There is no API for users to
// direct create them. Users can create them indirectly by using
// RunOptions.debug_options during Session::Run() call. See tfdbg documentation
diff --git a/tensorflow/core/ops/lookup_ops.cc b/tensorflow/core/ops/lookup_ops.cc
index 444aa8b954..2059741da9 100644
--- a/tensorflow/core/ops/lookup_ops.cc
+++ b/tensorflow/core/ops/lookup_ops.cc
@@ -140,11 +140,13 @@ REGISTER_OP("LookupTableSize")
.Input("table_handle: Ref(string)")
.Output("size: int64")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
+WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSize");
REGISTER_OP("LookupTableSizeV2")
.Input("table_handle: resource")
.Output("size: int64")
.SetShapeFn(ScalarAndTwoElementVectorInputsAndScalarOutputs);
+WHITELIST_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSizeV2");
REGISTER_OP("LookupTableExport")
.Input("table_handle: Ref(string)")
diff --git a/tensorflow/core/ops/math_ops.cc b/tensorflow/core/ops/math_ops.cc
index c229bd5a41..386ae9635a 100644
--- a/tensorflow/core/ops/math_ops.cc
+++ b/tensorflow/core/ops/math_ops.cc
@@ -1380,10 +1380,26 @@ REGISTER_OP("HistogramFixedWidth")
.Attr("T: {int32, int64, float32, float64}")
.Attr("dtype: {int32, int64} = DT_INT32")
.SetShapeFn([](InferenceContext* c) {
+ // value_range should be a vector.
+ ShapeHandle value_range_shape;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &value_range_shape));
+ // value_range should have two elements.
+ DimensionHandle unused;
+ TF_RETURN_IF_ERROR(
+ c->WithValue(c->Dim(value_range_shape, 0), 2, &unused));
+ // nbins should be a scalar.
+ ShapeHandle nbins_shape;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &nbins_shape));
+
+ // If nbins is available, set the shape from nbins.
const Tensor* nbins_input = c->input_tensor(2);
if (nbins_input != nullptr) {
int64 nbins;
TF_RETURN_IF_ERROR(c->GetScalarFromTensor(nbins_input, &nbins));
+ // nbins has to be positive.
+ if (nbins <= 0) {
+ return errors::InvalidArgument("Requires nbins > 0: ", nbins);
+ }
c->set_output(0, c->Vector(nbins));
} else {
c->set_output(0, c->UnknownShapeOfRank(1));
@@ -1488,6 +1504,13 @@ REGISTER_OP("QuantizedAdd")
.SetIsCommutative()
.SetShapeFn([](InferenceContext* c) {
TF_RETURN_IF_ERROR(shape_inference::BroadcastBinaryOpShapeFn(c));
+ // min_x, max_x, min_y, max_y should be scalar.
+ ShapeHandle unused;
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 0, &unused));
+ TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 0, &unused));
+
c->set_output(1, c->Scalar());
c->set_output(2, c->Scalar());
return Status::OK();
diff --git a/tensorflow/core/ops/math_ops_test.cc b/tensorflow/core/ops/math_ops_test.cc
index 8f974d5367..23f1538912 100644
--- a/tensorflow/core/ops/math_ops_test.cc
+++ b/tensorflow/core/ops/math_ops_test.cc
@@ -528,4 +528,34 @@ TEST(MathOpsTest, Cross_ShapeFn) {
INFER_OK(op, "[?];[?]", "in0");
INFER_OK(op, "[1,?,3];[?,?,?]", "in0");
}
+
+TEST(MathOpsTest, HistogramFixedWidth_ShapeFn) {
+ ShapeInferenceTestOp op("HistogramFixedWidth");
+
+ // value_range should be vector.
+ INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[];[]");
+ // value_range should have 2 elements.
+ INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];[]");
+ // nbins should be scalar.
+ INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[2];[2]");
+
+ INFER_OK(op, "?;?;?", "[?]");
+ INFER_OK(op, "[?];[2];[]", "[?]");
+ INFER_OK(op, "[?];[2];?", "[?]");
+}
+
+TEST(MathOpsTest, QuantizedAdd_ShapeFn) {
+ ShapeInferenceTestOp op("QuantizedAdd");
+
+ INFER_OK(op, "?;?;?;?;?;?", "?;[];[]");
+ INFER_OK(op, "?;?;[];[];[];[]", "?;[];[]");
+ INFER_OK(op, "[1,2];?;[];[];[];[]", "?;[];[]");
+ INFER_OK(op, "[];[2];[];[];[];[]", "[d1_0];[];[]");
+
+ // Rank checks on input scalars.
+ INFER_ERROR("must be rank 0", op, "?;?;[1];?;?;?");
+ INFER_ERROR("must be rank 0", op, "?;?;?;[2];?;?");
+ INFER_ERROR("must be rank 0", op, "?;?;?;?;[3];?");
+ INFER_ERROR("must be rank 0", op, "?;?;?;?;?;[4]");
+}
} // end namespace tensorflow
diff --git a/tensorflow/core/ops/ops.pbtxt b/tensorflow/core/ops/ops.pbtxt
index 9a9f10f01f..978bb0bbf4 100644
--- a/tensorflow/core/ops/ops.pbtxt
+++ b/tensorflow/core/ops/ops.pbtxt
@@ -2490,8 +2490,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -2674,8 +2672,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -3989,8 +3985,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -4040,8 +4034,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -4140,6 +4132,8 @@ op {
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
+ type: DT_UINT32
+ type: DT_UINT64
type: DT_INT8
type: DT_INT16
type: DT_COMPLEX64
@@ -4165,6 +4159,8 @@ op {
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
+ type: DT_UINT32
+ type: DT_UINT64
type: DT_INT8
type: DT_INT16
type: DT_COMPLEX64
@@ -5730,8 +5726,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -5809,8 +5803,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -5888,8 +5880,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -8592,8 +8582,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -8664,8 +8652,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -8736,8 +8722,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15509,8 +15493,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15588,8 +15570,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15660,8 +15640,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -15803,8 +15781,6 @@ op {
list {
s: "NHWC"
s: "NCHW"
- s: "HWNC"
- s: "HWCN"
}
}
}
@@ -26206,6 +26182,7 @@ op {
type: DT_HALF
type: DT_UINT32
type: DT_UINT64
+ type: DT_BOOL
}
}
}
@@ -27606,7 +27583,11 @@ op {
type: DT_INT64
}
input_arg {
- name: "stride"
+ name: "window_shift"
+ type: DT_INT64
+ }
+ input_arg {
+ name: "window_stride"
type: DT_INT64
}
output_arg {
diff --git a/tensorflow/core/platform/windows/port.cc b/tensorflow/core/platform/windows/port.cc
index f2aaf13bec..5375f56372 100644
--- a/tensorflow/core/platform/windows/port.cc
+++ b/tensorflow/core/platform/windows/port.cc
@@ -33,6 +33,7 @@ limitations under the License.
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
+#include "tensorflow/core/platform/numa.h"
#include "tensorflow/core/platform/snappy.h"
#include "tensorflow/core/platform/types.h"
@@ -57,6 +58,17 @@ int NumSchedulableCPUs() {
return system_info.dwNumberOfProcessors;
}
+bool NUMAEnabled() {
+ // Not yet implemented: coming soon.
+ return false;
+}
+
+int NUMANumNodes() { return 1; }
+
+void NUMASetThreadNodeAffinity(int node) {}
+
+int NUMAGetThreadNodeAffinity() { return kNUMANoAffinity; }
+
void* AlignedMalloc(size_t size, int minimum_alignment) {
#ifdef TENSORFLOW_USE_JEMALLOC
void* ptr = NULL;
@@ -108,6 +120,14 @@ void Free(void* ptr) {
#endif
}
+void* NUMAMalloc(int node, size_t size, int minimum_alignment) {
+ return AlignedMalloc(size, minimum_alignment);
+}
+
+void NUMAFree(void* ptr, size_t size) { Free(ptr); }
+
+int NUMAGetMemAffinity(const void* addr) { return kNUMANoAffinity; }
+
void MallocExtension_ReleaseToSystem(std::size_t num_bytes) {
// No-op.
}
diff --git a/tensorflow/core/protobuf/config.proto b/tensorflow/core/protobuf/config.proto
index 5b6aa47b93..d701ce8e12 100644
--- a/tensorflow/core/protobuf/config.proto
+++ b/tensorflow/core/protobuf/config.proto
@@ -145,7 +145,8 @@ message GPUOptions {
bool use_unified_memory = 2;
// If > 1, the number of device-to-device copy streams to create
- // for each GPUDevice.
+ // for each GPUDevice. Default value is 0, which is automatically
+ // converted to 1.
int32 num_dev_to_dev_copy_streams = 3;
}
@@ -389,6 +390,9 @@ message ConfigProto {
message Experimental {
// Task name for group resolution.
string collective_group_leader = 1;
+ // Whether the client will format templated errors. For example, the string:
+ // "The node was defined on ^^node:Foo:${file}:${line}^^".
+ bool client_handles_error_formatting = 2;
};
Experimental experimental = 16;
@@ -412,6 +416,11 @@ message RunOptions {
int64 timeout_in_ms = 2;
// The thread pool to use, if session_inter_op_thread_pool is configured.
+ // To use the caller thread set this to -1 - this uses the caller thread
+ // to execute Session::Run() and thus avoids a context switch. Using the
+ // caller thread to execute Session::Run() should be done ONLY for simple
+ // graphs, where the overhead of an additional context switch is
+ // comparable with the overhead of Session::Run().
int32 inter_op_thread_pool = 3;
// Whether the partition graph(s) executed by the executor(s) should be
diff --git a/tensorflow/core/protobuf/debug.proto b/tensorflow/core/protobuf/debug.proto
index 499900f965..811cf406b9 100644
--- a/tensorflow/core/protobuf/debug.proto
+++ b/tensorflow/core/protobuf/debug.proto
@@ -7,7 +7,7 @@ option java_multiple_files = true;
option java_package = "org.tensorflow.framework";
option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf";
-// EXPERIMENTAL. Option for watching a node.
+// Option for watching a node in TensorFlow Debugger (tfdbg).
message DebugTensorWatch {
// Name of the node to watch.
string node_name = 1;
@@ -51,7 +51,7 @@ message DebugTensorWatch {
bool tolerate_debug_op_creation_failures = 5;
}
-// EXPERIMENTAL. Options for initializing DebuggerState.
+// Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).
message DebugOptions {
// Debugging options
repeated DebugTensorWatch debug_tensor_watch_opts = 4;
diff --git a/tensorflow/core/protobuf/eager_service.proto b/tensorflow/core/protobuf/eager_service.proto
index 5b05a1b3ee..63ba4eb173 100644
--- a/tensorflow/core/protobuf/eager_service.proto
+++ b/tensorflow/core/protobuf/eager_service.proto
@@ -8,6 +8,7 @@ import "tensorflow/core/framework/function.proto";
import "tensorflow/core/framework/versions.proto";
import "tensorflow/core/protobuf/tensorflow_server.proto";
import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/tensor.proto";
message RemoteTensorHandle {
// The ID of the operation that produced this tensor.
@@ -128,6 +129,24 @@ message RegisterFunctionRequest {
message RegisterFunctionResponse {
}
+message SendTensorRequest {
+ fixed64 context_id = 1;
+
+ // All remote tensors are identified by <Op ID, Output num>. To mimic this
+ // situation when directly sending tensors, we include an "artificial" op ID
+ // (which would have corresponded to the _Recv op when not using SendTensor).
+ int64 op_id = 2;
+ // The index within the repeated field is the output number that will help
+ // uniquely identify (along with the above op_id) the particular tensor.
+ repeated TensorProto tensors = 3;
+
+ // The device on which the tensors should be resident.
+ string device_name = 4;
+}
+
+message SendTensorResponse {
+}
+
////////////////////////////////////////////////////////////////////////////////
//
// Eager Service defines a TensorFlow service that executes operations eagerly
@@ -174,4 +193,8 @@ service EagerService {
// Takes a FunctionDef and makes it enqueable on the remote worker.
rpc RegisterFunction(RegisterFunctionRequest)
returns (RegisterFunctionResponse);
+
+ // An RPC to push tensors to the server. At times, certain environments don't
+ // allow the server to connect back to the client.
+ rpc SendTensor(SendTensorRequest) returns (SendTensorResponse);
}
diff --git a/tensorflow/core/protobuf/tensorflow_server.proto b/tensorflow/core/protobuf/tensorflow_server.proto
index be25804a1b..2bf48d50e1 100644
--- a/tensorflow/core/protobuf/tensorflow_server.proto
+++ b/tensorflow/core/protobuf/tensorflow_server.proto
@@ -46,6 +46,6 @@ message ServerDef {
// The protocol to be used by this server.
//
- // Acceptable values include: "grpc".
+ // Acceptable values include: "grpc", "grpc+verbs".
string protocol = 5;
}
diff --git a/tensorflow/core/public/session.h b/tensorflow/core/public/session.h
index d58c877cfd..cc8596ef3d 100644
--- a/tensorflow/core/public/session.h
+++ b/tensorflow/core/public/session.h
@@ -237,7 +237,7 @@ class Session {
/// If session creation succeeds, the new `Session` will be stored in
/// `*out_session`, the caller will take ownership of the returned
/// `*out_session`, and this function will return `OK()`. Otherwise, this
-/// function will return an error status.
+/// function will return an error status and set *out_session to nullptr.
Status NewSession(const SessionOptions& options, Session** out_session);
/// \brief Resets resource containers associated with a target.
diff --git a/tensorflow/core/public/version.h b/tensorflow/core/public/version.h
index cb1fd09dbb..cea5e8ffb0 100644
--- a/tensorflow/core/public/version.h
+++ b/tensorflow/core/public/version.h
@@ -24,7 +24,7 @@ limitations under the License.
// TF_VERSION_SUFFIX is non-empty for pre-releases (e.g. "-alpha", "-alpha.1",
// "-beta", "-rc", "-rc.1")
-#define TF_VERSION_SUFFIX "-rc0"
+#define TF_VERSION_SUFFIX ""
#define TF_STR_HELPER(x) #x
#define TF_STR(x) TF_STR_HELPER(x)
diff --git a/tensorflow/core/util/batch_util.cc b/tensorflow/core/util/batch_util.cc
index 7ea8851e65..45556d53a4 100644
--- a/tensorflow/core/util/batch_util.cc
+++ b/tensorflow/core/util/batch_util.cc
@@ -264,6 +264,7 @@ Status CopyElementToLargerSlice(const Tensor& element, Tensor* parent,
HANDLE_DIMS(2);
HANDLE_DIMS(3);
HANDLE_DIMS(4);
+ HANDLE_DIMS(5);
#undef HANDLE_DIMS
default:
return errors::Unimplemented("CopyElementToLargerSlice Unhandled rank: ",
diff --git a/tensorflow/core/util/ctc/ctc_loss_util.h b/tensorflow/core/util/ctc/ctc_loss_util.h
index 9c71f58e23..50f8f49f1c 100644
--- a/tensorflow/core/util/ctc/ctc_loss_util.h
+++ b/tensorflow/core/util/ctc/ctc_loss_util.h
@@ -31,8 +31,10 @@ const float kLogZero = -std::numeric_limits<float>::infinity();
inline float LogSumExp(float log_prob_1, float log_prob_2) {
// Always have 'b' be the smaller number to avoid the exponential from
// blowing up.
- if (log_prob_1 == kLogZero && log_prob_2 == kLogZero) {
- return kLogZero;
+ if (log_prob_1 == kLogZero) {
+ return log_prob_2;
+ } else if (log_prob_2 == kLogZero) {
+ return log_prob_1;
} else {
return (log_prob_1 > log_prob_2)
? log_prob_1 + log1pf(expf(log_prob_2 - log_prob_1))
diff --git a/tensorflow/core/util/cuda_launch_config.h b/tensorflow/core/util/cuda_launch_config.h
index 81df7a51d7..d0d95736d3 100644
--- a/tensorflow/core/util/cuda_launch_config.h
+++ b/tensorflow/core/util/cuda_launch_config.h
@@ -295,7 +295,7 @@ inline const cudaStream_t& GetCudaStream(OpKernelContext* context) {
reinterpret_cast<const cudaStream_t*>(context->op_device_context()
->stream()
->implementation()
- ->CudaStreamMemberHack()));
+ ->GpuStreamMemberHack()));
return *ptr;
}
diff --git a/tensorflow/core/util/proto/BUILD b/tensorflow/core/util/proto/BUILD
index ade14ed162..7e549c7764 100644
--- a/tensorflow/core/util/proto/BUILD
+++ b/tensorflow/core/util/proto/BUILD
@@ -60,3 +60,13 @@ cc_library(
],
alwayslink = 1,
)
+
+cc_library(
+ name = "proto_utils",
+ srcs = ["proto_utils.cc"],
+ hdrs = ["proto_utils.h"],
+ deps = [
+ "//tensorflow/core:framework",
+ "//tensorflow/core:lib",
+ ],
+)
diff --git a/tensorflow/core/util/proto/decode.h b/tensorflow/core/util/proto/decode.h
index 74634a356a..cbcb203ee7 100644
--- a/tensorflow/core/util/proto/decode.h
+++ b/tensorflow/core/util/proto/decode.h
@@ -27,6 +27,7 @@ limitations under the License.
#define TENSORFLOW_CORE_UTIL_PROTO_DECODE_H_
#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
@@ -103,6 +104,16 @@ template <class TensorType, enum WireFormatLite::FieldType DeclaredType>
const uint8* ReadFromArray(const uint8* buf, TensorType* value);
template <>
+inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_INT32>(
+ const uint8* buf, int64* value) {
+ uint32 temp;
+ bool unused_ok; // The Counting pass would have failed if this were corrupt.
+ buf = ReadVarint32FromArray(buf, &unused_ok, &temp);
+ *value = static_cast<int64>(temp);
+ return buf;
+}
+
+template <>
inline const uint8* ReadFromArray<int32, WireFormatLite::TYPE_INT32>(
const uint8* buf, int32* value) {
uint32 temp;
@@ -123,8 +134,8 @@ inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_INT64>(
}
template <>
-inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_UINT32>(
- const uint8* buf, int64* value) {
+inline const uint8* ReadFromArray<uint64, WireFormatLite::TYPE_UINT32>(
+ const uint8* buf, uint64* value) {
uint32 temp;
bool unused_ok; // The Counting pass would have failed if this were corrupt.
buf = ReadVarint32FromArray(buf, &unused_ok, &temp);
@@ -133,22 +144,26 @@ inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_UINT32>(
}
template <>
-inline const uint8* ReadFromArray<int32, WireFormatLite::TYPE_UINT32>(
- const uint8* buf, int32* value) {
- uint32 temp;
+inline const uint8* ReadFromArray<uint32, WireFormatLite::TYPE_UINT32>(
+ const uint8* buf, uint32* value) {
bool unused_ok; // The Counting pass would have failed if this were corrupt.
- buf = ReadVarint32FromArray(buf, &unused_ok, &temp);
- *value = WrapUnsignedAsSigned32(temp);
- return buf;
+ return ReadVarint32FromArray(buf, &unused_ok, value);
+}
+
+template <>
+inline const uint8* ReadFromArray<uint64, WireFormatLite::TYPE_UINT64>(
+ const uint8* buf, uint64* value) {
+ bool unused_ok; // The Counting pass would have failed if this were corrupt.
+ return ReadVarint64FromArray(buf, &unused_ok, value);
}
template <>
-inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_UINT64>(
+inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_SINT32>(
const uint8* buf, int64* value) {
uint64 temp;
bool unused_ok; // The Counting pass would have failed if this were corrupt.
buf = ReadVarint64FromArray(buf, &unused_ok, &temp);
- *value = static_cast<int64>(temp);
+ *value = WireFormatLite::ZigZagDecode32(temp);
return buf;
}
@@ -173,8 +188,8 @@ inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_SINT64>(
}
template <>
-inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_FIXED32>(
- const uint8* buf, int64* value) {
+inline const uint8* ReadFromArray<uint64, WireFormatLite::TYPE_FIXED32>(
+ const uint8* buf, uint64* value) {
uint32 temp;
buf = WireFormatLite::ReadPrimitiveFromArray<uint32,
WireFormatLite::TYPE_FIXED32>(
@@ -184,8 +199,8 @@ inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_FIXED32>(
}
template <>
-inline const uint8* ReadFromArray<int32, WireFormatLite::TYPE_FIXED32>(
- const uint8* buf, int32* value) {
+inline const uint8* ReadFromArray<uint32, WireFormatLite::TYPE_FIXED32>(
+ const uint8* buf, uint32* value) {
uint32 temp;
buf = WireFormatLite::ReadPrimitiveFromArray<uint32,
WireFormatLite::TYPE_FIXED32>(
@@ -195,8 +210,8 @@ inline const uint8* ReadFromArray<int32, WireFormatLite::TYPE_FIXED32>(
}
template <>
-inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_FIXED64>(
- const uint8* buf, int64* value) {
+inline const uint8* ReadFromArray<uint64, WireFormatLite::TYPE_FIXED64>(
+ const uint8* buf, uint64* value) {
protobuf_uint64 temp;
buf = WireFormatLite::ReadPrimitiveFromArray<protobuf_uint64,
WireFormatLite::TYPE_FIXED64>(
@@ -206,6 +221,17 @@ inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_FIXED64>(
}
template <>
+inline const uint8* ReadFromArray<int64, WireFormatLite::TYPE_SFIXED32>(
+ const uint8* buf, int64* value) {
+ int32 temp;
+ buf = WireFormatLite::ReadPrimitiveFromArray<int32,
+ WireFormatLite::TYPE_SFIXED32>(
+ buf, &temp);
+ *value = temp;
+ return buf;
+}
+
+template <>
inline const uint8* ReadFromArray<int32, WireFormatLite::TYPE_SFIXED32>(
const uint8* buf, int32* value) {
return WireFormatLite::ReadPrimitiveFromArray<int32,
@@ -233,6 +259,17 @@ inline const uint8* ReadFromArray<float, WireFormatLite::TYPE_FLOAT>(
}
template <>
+inline const uint8* ReadFromArray<double, WireFormatLite::TYPE_FLOAT>(
+ const uint8* buf, double* value) {
+ float temp;
+ buf =
+ WireFormatLite::ReadPrimitiveFromArray<float, WireFormatLite::TYPE_FLOAT>(
+ buf, &temp);
+ *value = temp;
+ return buf;
+}
+
+template <>
inline const uint8* ReadFromArray<double, WireFormatLite::TYPE_DOUBLE>(
const uint8* buf, double* value) {
return WireFormatLite::ReadPrimitiveFromArray<double,
@@ -334,48 +371,56 @@ inline Status ReadGroupBytes(CodedInputStream* input, int field_number,
inline Status ReadValue(CodedInputStream* input,
WireFormatLite::FieldType field_type, int field_number,
DataType dtype, int index, void* datap) {
- // Dispatch to the appropriately typed field reader based on the
- // schema type.
+ // Dispatch to the appropriately typed field reader based on the schema type.
switch (field_type) {
case WireFormatLite::TYPE_DOUBLE:
return ReadPrimitive<double, double, WireFormatLite::TYPE_DOUBLE>(
input, index, datap);
case WireFormatLite::TYPE_FLOAT:
- if (dtype == DataType::DT_FLOAT) {
- return ReadPrimitive<float, float, WireFormatLite::TYPE_FLOAT>(
- input, index, datap);
- }
- if (dtype == DataType::DT_DOUBLE) {
- return ReadPrimitive<float, double, WireFormatLite::TYPE_FLOAT>(
- input, index, datap);
+ switch (dtype) {
+ case DataType::DT_DOUBLE:
+ return ReadPrimitive<float, double, WireFormatLite::TYPE_FLOAT>(
+ input, index, datap);
+ case DataType::DT_FLOAT:
+ return ReadPrimitive<float, float, WireFormatLite::TYPE_FLOAT>(
+ input, index, datap);
+ default:
+ return errors::DataLoss("Failed reading TYPE_FLOAT for ",
+ DataTypeString(dtype));
}
- // Any case that reaches this point should have triggered an error
- // already.
- return errors::DataLoss("Failed reading TYPE_FLOAT");
case WireFormatLite::TYPE_INT64:
return ReadPrimitive<protobuf_int64, int64, WireFormatLite::TYPE_INT64>(
input, index, datap);
case WireFormatLite::TYPE_UINT64:
- return ReadPrimitive<protobuf_uint64, int64, WireFormatLite::TYPE_UINT64>(
- input, index, datap);
+ return ReadPrimitive<protobuf_uint64, uint64,
+ WireFormatLite::TYPE_UINT64>(input, index, datap);
case WireFormatLite::TYPE_INT32:
- return ReadPrimitive<int32, int32, WireFormatLite::TYPE_INT32>(
- input, index, datap);
+ switch (dtype) {
+ case DataType::DT_INT64:
+ return ReadPrimitive<int32, int64, WireFormatLite::TYPE_INT32>(
+ input, index, datap);
+ case DataType::DT_INT32:
+ return ReadPrimitive<int32, int32, WireFormatLite::TYPE_INT32>(
+ input, index, datap);
+ default:
+ return errors::DataLoss("Failed reading TYPE_INT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_FIXED64:
- return ReadPrimitive<protobuf_uint64, int64,
+ return ReadPrimitive<protobuf_uint64, uint64,
WireFormatLite::TYPE_FIXED64>(input, index, datap);
case WireFormatLite::TYPE_FIXED32:
- if (dtype == DataType::DT_INT64) {
- return ReadPrimitive<uint32, int64, WireFormatLite::TYPE_FIXED32>(
- input, index, datap);
- }
- if (dtype == DataType::DT_INT32) {
- return ReadPrimitive<uint32, int32, WireFormatLite::TYPE_FIXED32>(
- input, index, datap);
+ switch (dtype) {
+ case DataType::DT_UINT64:
+ return ReadPrimitive<uint32, uint64, WireFormatLite::TYPE_FIXED32>(
+ input, index, datap);
+ case DataType::DT_UINT32:
+ return ReadPrimitive<uint32, uint32, WireFormatLite::TYPE_FIXED32>(
+ input, index, datap);
+ default:
+ return errors::DataLoss("Failed reading TYPE_FIXED32 for ",
+ DataTypeString(dtype));
}
- // Any case that reaches this point should have triggered an error
- // already.
- return errors::DataLoss("Failed reading TYPE_FIXED32");
case WireFormatLite::TYPE_BOOL:
return ReadPrimitive<bool, bool, WireFormatLite::TYPE_BOOL>(input, index,
datap);
@@ -388,29 +433,47 @@ inline Status ReadValue(CodedInputStream* input,
case WireFormatLite::TYPE_BYTES:
return ReadBytes(input, index, datap);
case WireFormatLite::TYPE_UINT32:
- if (dtype == DataType::DT_INT64) {
- return ReadPrimitive<uint32, int64, WireFormatLite::TYPE_UINT32>(
- input, index, datap);
+ switch (dtype) {
+ case DataType::DT_UINT64:
+ return ReadPrimitive<uint32, uint64, WireFormatLite::TYPE_UINT32>(
+ input, index, datap);
+ case DataType::DT_UINT32:
+ return ReadPrimitive<uint32, uint32, WireFormatLite::TYPE_UINT32>(
+ input, index, datap);
+ default:
+ return errors::DataLoss("Failed reading TYPE_UINT32 for ",
+ DataTypeString(dtype));
}
- if (dtype == DataType::DT_INT32) {
- return ReadPrimitive<uint32, int32, WireFormatLite::TYPE_UINT32>(
- input, index, datap);
- }
- // Any case that reaches this point should have triggered an error
- // already.
- return errors::DataLoss("Failed reading TYPE_UINT32");
case WireFormatLite::TYPE_ENUM:
return ReadPrimitive<int32, int32, WireFormatLite::TYPE_ENUM>(
input, index, datap);
case WireFormatLite::TYPE_SFIXED32:
- return ReadPrimitive<int32, int32, WireFormatLite::TYPE_SFIXED32>(
- input, index, datap);
+ switch (dtype) {
+ case DataType::DT_INT64:
+ return ReadPrimitive<int32, int64, WireFormatLite::TYPE_SFIXED32>(
+ input, index, datap);
+ case DataType::DT_INT32:
+ return ReadPrimitive<int32, int32, WireFormatLite::TYPE_SFIXED32>(
+ input, index, datap);
+ default:
+ return errors::DataLoss("Failed reading TYPE_SFIXED32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_SFIXED64:
return ReadPrimitive<protobuf_int64, int64,
WireFormatLite::TYPE_SFIXED64>(input, index, datap);
case WireFormatLite::TYPE_SINT32:
- return ReadPrimitive<int32, int32, WireFormatLite::TYPE_SINT32>(
- input, index, datap);
+ switch (dtype) {
+ case DataType::DT_INT64:
+ return ReadPrimitive<int32, int64, WireFormatLite::TYPE_SINT32>(
+ input, index, datap);
+ case DataType::DT_INT32:
+ return ReadPrimitive<int32, int32, WireFormatLite::TYPE_SINT32>(
+ input, index, datap);
+ default:
+ return errors::DataLoss("Failed reading TYPE_SINT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_SINT64:
return ReadPrimitive<protobuf_int64, int64, WireFormatLite::TYPE_SINT64>(
input, index, datap);
@@ -425,47 +488,66 @@ inline Status ReadPackedFromArray(const void* buf, size_t buf_size,
const WireFormatLite::FieldType field_type,
const int field_number, const DataType dtype,
const int stride, int* index, void* data) {
- // Dispatch to the appropriately typed field reader based on the
- // schema type.
+ // Dispatch to the appropriately typed field reader based on the schema type.
switch (field_type) {
case WireFormatLite::TYPE_DOUBLE:
*index += ReadPackedPrimitives<double, WireFormatLite::TYPE_DOUBLE>(
buf, buf_size, *index, stride, data);
return Status::OK();
case WireFormatLite::TYPE_FLOAT:
- *index += ReadPackedPrimitives<float, WireFormatLite::TYPE_FLOAT>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
+ switch (dtype) {
+ case DataType::DT_DOUBLE:
+ *index += ReadPackedPrimitives<double, WireFormatLite::TYPE_FLOAT>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ case DataType::DT_FLOAT:
+ *index += ReadPackedPrimitives<float, WireFormatLite::TYPE_FLOAT>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ default:
+ return errors::DataLoss("Failed reading TYPE_FLOAT for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_INT64:
*index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_INT64>(
buf, buf_size, *index, stride, data);
return Status::OK();
case WireFormatLite::TYPE_UINT64:
- *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_UINT64>(
+ *index += ReadPackedPrimitives<uint64, WireFormatLite::TYPE_UINT64>(
buf, buf_size, *index, stride, data);
return Status::OK();
case WireFormatLite::TYPE_INT32:
- *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_INT32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
+ switch (dtype) {
+ case DataType::DT_INT64:
+ *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_INT32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ case DataType::DT_INT32:
+ *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_INT32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ default:
+ return errors::DataLoss("Failed reading TYPE_INT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_FIXED64:
- *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_FIXED64>(
+ *index += ReadPackedPrimitives<uint64, WireFormatLite::TYPE_FIXED64>(
buf, buf_size, *index, stride, data);
return Status::OK();
case WireFormatLite::TYPE_FIXED32:
- if (dtype == DataType::DT_INT64) {
- *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_FIXED32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
- }
- if (dtype == DataType::DT_INT32) {
- *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_FIXED32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
+ switch (dtype) {
+ case DataType::DT_UINT64:
+ *index += ReadPackedPrimitives<uint64, WireFormatLite::TYPE_FIXED32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ case DataType::DT_UINT32:
+ *index += ReadPackedPrimitives<uint32, WireFormatLite::TYPE_FIXED32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ default:
+ return errors::DataLoss("Failed reading TYPE_FIXED32 for ",
+ DataTypeString(dtype));
}
- // Any case that reaches this point should have triggered an error
- // already.
- return errors::DataLoss("Failed reading TYPE_FIXED32");
case WireFormatLite::TYPE_BOOL:
*index += ReadPackedPrimitives<bool, WireFormatLite::TYPE_BOOL>(
buf, buf_size, *index, stride, data);
@@ -476,38 +558,56 @@ inline Status ReadPackedFromArray(const void* buf, size_t buf_size,
case WireFormatLite::TYPE_BYTES:
return errors::DataLoss("Non-primitive type encountered as packed");
case WireFormatLite::TYPE_UINT32:
- if (dtype == DataType::DT_INT64) {
- *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_UINT32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
+ switch (dtype) {
+ case DataType::DT_UINT64:
+ *index += ReadPackedPrimitives<uint64, WireFormatLite::TYPE_UINT32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ case DataType::DT_UINT32:
+ *index += ReadPackedPrimitives<uint32, WireFormatLite::TYPE_UINT32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ default:
+ return errors::DataLoss("Failed reading TYPE_UINT32 for ",
+ DataTypeString(dtype));
}
- if (dtype == DataType::DT_INT32) {
- *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_UINT32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
- }
- // Any case that reaches this point should have triggered an error
- // already.
- return errors::DataLoss("Failed reading TYPE_UINT32");
case WireFormatLite::TYPE_ENUM:
*index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_ENUM>(
buf, buf_size, *index, stride, data);
return Status::OK();
case WireFormatLite::TYPE_SFIXED32:
- *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_SFIXED32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
-
+ switch (dtype) {
+ case DataType::DT_INT64:
+ *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_SFIXED32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ case DataType::DT_INT32:
+ *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_SFIXED32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ default:
+ return errors::DataLoss("Failed reading TYPE_INT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_SFIXED64:
*index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_SFIXED64>(
buf, buf_size, *index, stride, data);
return Status::OK();
case WireFormatLite::TYPE_SINT32:
- *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_SINT32>(
- buf, buf_size, *index, stride, data);
- return Status::OK();
-
+ switch (dtype) {
+ case DataType::DT_INT64:
+ *index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_SINT32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ case DataType::DT_INT32:
+ *index += ReadPackedPrimitives<int32, WireFormatLite::TYPE_SINT32>(
+ buf, buf_size, *index, stride, data);
+ return Status::OK();
+ default:
+ return errors::DataLoss("Failed reading TYPE_SINT32 for ",
+ DataTypeString(dtype));
+ }
case WireFormatLite::TYPE_SINT64:
*index += ReadPackedPrimitives<int64, WireFormatLite::TYPE_SINT64>(
buf, buf_size, *index, stride, data);
diff --git a/tensorflow/core/util/proto/proto_utils.cc b/tensorflow/core/util/proto/proto_utils.cc
new file mode 100644
index 0000000000..201f05a129
--- /dev/null
+++ b/tensorflow/core/util/proto/proto_utils.cc
@@ -0,0 +1,70 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/platform/protobuf.h"
+
+#include "tensorflow/core/util/proto/proto_utils.h"
+
+namespace tensorflow {
+namespace proto_utils {
+
+using tensorflow::protobuf::FieldDescriptor;
+using tensorflow::protobuf::internal::WireFormatLite;
+
+bool IsCompatibleType(FieldDescriptor::Type field_type, DataType dtype) {
+ switch (field_type) {
+ case WireFormatLite::TYPE_DOUBLE:
+ return dtype == tensorflow::DT_DOUBLE;
+ case WireFormatLite::TYPE_FLOAT:
+ return dtype == tensorflow::DT_FLOAT || dtype == tensorflow::DT_DOUBLE;
+ case WireFormatLite::TYPE_INT64:
+ return dtype == tensorflow::DT_INT64;
+ case WireFormatLite::TYPE_UINT64:
+ return dtype == tensorflow::DT_UINT64;
+ case WireFormatLite::TYPE_INT32:
+ return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
+ case WireFormatLite::TYPE_FIXED64:
+ return dtype == tensorflow::DT_UINT64;
+ case WireFormatLite::TYPE_FIXED32:
+ return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
+ case WireFormatLite::TYPE_BOOL:
+ return dtype == tensorflow::DT_BOOL;
+ case WireFormatLite::TYPE_STRING:
+ return dtype == tensorflow::DT_STRING;
+ case WireFormatLite::TYPE_GROUP:
+ return dtype == tensorflow::DT_STRING;
+ case WireFormatLite::TYPE_MESSAGE:
+ return dtype == tensorflow::DT_STRING;
+ case WireFormatLite::TYPE_BYTES:
+ return dtype == tensorflow::DT_STRING;
+ case WireFormatLite::TYPE_UINT32:
+ return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
+ case WireFormatLite::TYPE_ENUM:
+ return dtype == tensorflow::DT_INT32;
+ case WireFormatLite::TYPE_SFIXED32:
+ return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
+ case WireFormatLite::TYPE_SFIXED64:
+ return dtype == tensorflow::DT_INT64;
+ case WireFormatLite::TYPE_SINT32:
+ return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
+ case WireFormatLite::TYPE_SINT64:
+ return dtype == tensorflow::DT_INT64;
+ // default: intentionally omitted in order to enable static checking.
+ }
+}
+
+} // namespace proto_utils
+} // namespace tensorflow
diff --git a/tensorflow/core/util/proto/proto_utils.h b/tensorflow/core/util/proto/proto_utils.h
new file mode 100644
index 0000000000..d5e0b9006c
--- /dev/null
+++ b/tensorflow/core/util/proto/proto_utils.h
@@ -0,0 +1,33 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CORE_UTIL_PROTO_PROTO_UTILS_H_
+#define TENSORFLOW_CORE_UTIL_PROTO_PROTO_UTILS_H_
+
+#include "tensorflow/core/framework/types.h"
+#include "tensorflow/core/platform/protobuf.h"
+
+namespace tensorflow {
+namespace proto_utils {
+
+using tensorflow::protobuf::FieldDescriptor;
+
+// Returns true if the proto field type can be converted to the tensor dtype.
+bool IsCompatibleType(FieldDescriptor::Type field_type, DataType dtype);
+
+} // namespace proto_utils
+} // namespace tensorflow
+
+#endif // TENSORFLOW_CORE_UTIL_PROTO_PROTO_UTILS_H_
diff --git a/tensorflow/core/util/sparse/dim_comparator.h b/tensorflow/core/util/sparse/dim_comparator.h
index b773b33008..0782e7e1a8 100644
--- a/tensorflow/core/util/sparse/dim_comparator.h
+++ b/tensorflow/core/util/sparse/dim_comparator.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
-#define TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/kernels/bounds_check.h"
@@ -49,11 +49,11 @@ class DimComparator {
DimComparator(const TTypes<int64>::Matrix& ix, const VarDimArray& order,
const VarDimArray& shape)
: ix_(ix), order_(order), dims_(shape.size()) {
- CHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
- CHECK_LE(order.size(), shape.size()) << "Can only sort up to dims";
+ DCHECK_GT(order.size(), size_t{0}) << "Must order using at least one index";
+ DCHECK_LE(order.size(), shape.size()) << "Can only sort up to dims";
for (size_t d = 0; d < order.size(); ++d) {
- CHECK_GE(order[d], 0);
- CHECK_LT(order[d], shape.size());
+ DCHECK_GE(order[d], 0);
+ DCHECK_LT(order[d], shape.size());
}
}
@@ -97,7 +97,7 @@ class FixedDimComparator : DimComparator {
FixedDimComparator(const TTypes<int64>::Matrix& ix, const VarDimArray& order,
const VarDimArray& shape)
: DimComparator(ix, order, shape) {
- CHECK_EQ(order.size(), ORDER_DIM);
+ DCHECK_EQ(order.size(), ORDER_DIM);
}
inline bool operator()(const int64 i, const int64 j) const {
bool value = false;
@@ -116,4 +116,4 @@ class FixedDimComparator : DimComparator {
} // namespace sparse
} // namespace tensorflow
-#endif // TENSORFLOW_UTIL_SPARSE_DIM_COMPARATOR_H_
+#endif // TENSORFLOW_CORE_UTIL_SPARSE_DIM_COMPARATOR_H_
diff --git a/tensorflow/core/util/sparse/group_iterator.h b/tensorflow/core/util/sparse/group_iterator.h
index fb70318078..3fa8cb6116 100644
--- a/tensorflow/core/util/sparse/group_iterator.h
+++ b/tensorflow/core/util/sparse/group_iterator.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
-#define TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
#include <vector>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
@@ -143,4 +143,4 @@ typename TTypes<T>::UnalignedVec Group::values() const {
} // namespace sparse
} // namespace tensorflow
-#endif // TENSORFLOW_UTIL_SPARSE_GROUP_ITERATOR_H_
+#endif // TENSORFLOW_CORE_UTIL_SPARSE_GROUP_ITERATOR_H_
diff --git a/tensorflow/core/util/sparse/sparse_tensor.h b/tensorflow/core/util/sparse/sparse_tensor.h
index 258ee418c1..0f04b65f60 100644
--- a/tensorflow/core/util/sparse/sparse_tensor.h
+++ b/tensorflow/core/util/sparse/sparse_tensor.h
@@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
-#ifndef TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
-#define TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
+#ifndef TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
+#define TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
#include <limits>
#include <numeric>
@@ -26,8 +26,10 @@ limitations under the License.
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/bounds_check.h"
+#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/sparse/dim_comparator.h"
@@ -41,32 +43,88 @@ class SparseTensor {
typedef typename gtl::ArraySlice<int64> VarDimArray;
typedef typename gtl::InlinedVector<int64, 8> ShapeArray;
+ static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
+ const VarDimArray order, SparseTensor* result) {
+ if (ix.dtype() != DT_INT64) {
+ return Status(
+ error::INVALID_ARGUMENT,
+ strings::StrCat("indices must be type int64 but got: ", ix.dtype()));
+ }
+ if (!TensorShapeUtils::IsVector(vals.shape())) {
+ return Status(error::INVALID_ARGUMENT,
+ strings::StrCat("vals must be a vec, but got: ",
+ vals.shape().DebugString()));
+ }
+ if (ix.shape().dim_size(0) != vals.shape().dim_size(0)) {
+ return Status(error::INVALID_ARGUMENT,
+ strings::StrCat("indices and values rows (indexing "
+ "dimension) must match. (indices = ",
+ ix.shape().dim_size(0), ", values = ",
+ vals.shape().dim_size(0), ")"));
+ }
+ int dims;
+ TF_RETURN_IF_ERROR(GetDimsFromIx(ix, &dims));
+ if (order.size() != dims) {
+ return Status(error::INVALID_ARGUMENT,
+ "Order length must be SparseTensor rank.");
+ }
+ if (shape.size() != dims) {
+ return Status(error::INVALID_ARGUMENT,
+ "Shape rank must be SparseTensor rank.");
+ }
+
+ *result = SparseTensor(ix, vals, shape, order);
+ return Status();
+ }
+
+ static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
+ SparseTensor* result) {
+ return Create(ix, vals, TensorShapeToVector(shape),
+ UndefinedOrder(TensorShapeToVector(shape)), result);
+ }
+
+ static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
+ SparseTensor* result) {
+ return Create(ix, vals, shape, UndefinedOrder(shape), result);
+ }
+
+ static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
+ const VarDimArray order, SparseTensor* result) {
+ return Create(ix, vals, TensorShapeToVector(shape), order, result);
+ }
+
+ SparseTensor() : dims_(0) {}
+
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape)
: SparseTensor(ix, vals, TensorShapeToVector(shape),
UndefinedOrder(TensorShapeToVector(shape))) {}
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape)
: SparseTensor(ix, vals, shape, UndefinedOrder(shape)) {}
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape,
const VarDimArray order)
: SparseTensor(ix, vals, TensorShapeToVector(shape), order) {}
+ // DEPRECATED: use Create() functions instead of constructors directly.
SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order)
: ix_(ix),
vals_(vals),
shape_(shape.begin(), shape.end()),
order_(order.begin(), order.end()),
- dims_(GetDimsFromIx(ix)) {
- CHECK_EQ(ix.dtype(), DT_INT64)
+ dims_(UnsafeGetDimsFromIx(ix)) {
+ DCHECK_EQ(ix.dtype(), DT_INT64)
<< "indices must be type int64 but got: " << ix.dtype();
- CHECK(TensorShapeUtils::IsVector(vals.shape()))
+ DCHECK(TensorShapeUtils::IsVector(vals.shape()))
<< "vals must be a vec, but got: " << vals.shape().DebugString();
- CHECK_EQ(ix.shape().dim_size(0), vals.shape().dim_size(0))
+ DCHECK_EQ(ix.shape().dim_size(0), vals.shape().dim_size(0))
<< "indices and values rows (indexing dimension) must match.";
- CHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
- CHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
+ DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
+ DCHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
}
SparseTensor(const SparseTensor& other)
@@ -81,6 +139,16 @@ class SparseTensor {
vals_ = other.vals_;
shape_ = other.shape_;
order_ = other.order_;
+ dims_ = other.dims_;
+ return *this;
+ }
+
+ SparseTensor& operator=(SparseTensor&& other) {
+ ix_ = std::move(other.ix_);
+ vals_ = std::move(other.vals_);
+ shape_ = std::move(other.shape_);
+ order_ = std::move(other.order_);
+ dims_ = std::move(other.dims_);
return *this;
}
@@ -126,11 +194,11 @@ class SparseTensor {
//
// See the README.md in this directory for more usage information.
GroupIterable group(const VarDimArray& group_ix) const {
- CHECK_LE(group_ix.size(), dims_);
+ DCHECK_LE(group_ix.size(), dims_);
for (std::size_t di = 0; di < group_ix.size(); ++di) {
- CHECK_GE(group_ix[di], 0) << "Group dimension out of range";
- CHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
- CHECK_EQ(group_ix[di], order_[di])
+ DCHECK_GE(group_ix[di], 0) << "Group dimension out of range";
+ DCHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
+ DCHECK_EQ(group_ix[di], order_[di])
<< "Group dimension does not match sorted order";
}
return GroupIterable(ix_, vals_, dims_, group_ix);
@@ -166,9 +234,16 @@ class SparseTensor {
// isn't an integer multiple of split_dim, we add one extra dimension for
// each slice.
template <typename T>
+ static Status Split(const SparseTensor& tensor, const int split_dim,
+ const int num_split, std::vector<SparseTensor>* result);
+
+ // DEPRECATED: use the form of Split() that takes an output pointer and
+ // returns a status instead.
+ template <typename T>
static std::vector<SparseTensor> Split(const SparseTensor& tensor,
const int split_dim,
- const int num_split);
+ const int num_split,
+ Status* status = nullptr);
// Slice() will slice the input SparseTensor into a SparseTensor based on
// specified start and size. Both start and size are 1-D array with each
@@ -189,9 +264,18 @@ class SparseTensor {
}
private:
- static int GetDimsFromIx(const Tensor& ix) {
- CHECK(TensorShapeUtils::IsMatrix(ix.shape()))
- << "indices must be a matrix, but got: " << ix.shape().DebugString();
+ static Status GetDimsFromIx(const Tensor& ix, int* result) {
+ if (!TensorShapeUtils::IsMatrix(ix.shape())) {
+ return Status(error::INVALID_ARGUMENT,
+ strings::StrCat("indices must be a matrix, but got: ",
+ ix.shape().DebugString()));
+ }
+ *result = UnsafeGetDimsFromIx(ix);
+ return Status();
+ }
+
+ static int UnsafeGetDimsFromIx(const Tensor& ix) {
+ DCHECK(TensorShapeUtils::IsMatrix(ix.shape()));
return ix.dim_size(1);
}
@@ -251,8 +335,8 @@ class SparseTensor {
// Helper for Split() that returns the slice index.
static inline int GetSliceIndex(const int dim, const int split_size,
const int residual) {
- CHECK_GT(split_size, 0);
- CHECK_GE(dim, 0);
+ DCHECK_GT(split_size, 0);
+ DCHECK_GE(dim, 0);
if (residual == 0) return dim / split_size;
const int offset = residual * (split_size + 1);
if (dim < offset) {
@@ -265,8 +349,8 @@ class SparseTensor {
// Helper for Split() that returns the dimension in the slice.
static inline int GetDimensionInSlice(const int dim, const int split_size,
const int residual) {
- CHECK_GT(split_size, 0);
- CHECK_GE(dim, 0);
+ DCHECK_GT(split_size, 0);
+ DCHECK_GE(dim, 0);
if (residual == 0) return dim % split_size;
const int offset = residual * (split_size + 1);
if (dim < offset) {
@@ -279,8 +363,8 @@ class SparseTensor {
// Helper for Split() that returns the shape given a slice index.
static inline int GetSliceShape(const int slice_index, const int split_size,
const int residual) {
- CHECK_GT(split_size, 0);
- CHECK_GE(slice_index, 0);
+ DCHECK_GT(split_size, 0);
+ DCHECK_GE(slice_index, 0);
if (residual == 0) return split_size;
if (slice_index < residual) {
return split_size + 1;
@@ -293,7 +377,7 @@ class SparseTensor {
Tensor vals_;
ShapeArray shape_;
ShapeArray order_;
- const int dims_;
+ int dims_;
};
// This operation updates the indices and values Tensor rows, so it is
@@ -301,9 +385,9 @@ class SparseTensor {
// temporary space.
template <typename T>
void SparseTensor::Reorder(const VarDimArray& order) {
- CHECK_EQ(DataTypeToEnum<T>::v(), dtype())
+ DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
<< "Reorder requested with the wrong datatype";
- CHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
+ DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
auto ix_t = ix_.matrix<int64>();
auto vals_t = vals_.vec<T>();
@@ -360,13 +444,13 @@ void SparseTensor::Reorder(const VarDimArray& order) {
template <typename T>
bool SparseTensor::ValidateAndInitializeToDense(Tensor* out, bool initialize) {
- CHECK_EQ(DataTypeToEnum<T>::v(), dtype())
+ DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
<< "ToDense requested with the wrong datatype";
- CHECK_EQ(out->shape().dims(), dims_)
+ DCHECK_EQ(out->shape().dims(), dims_)
<< "Incompatible dimensions between SparseTensor and output";
- CHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
+ DCHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
<< "Output must be type: " << DataTypeToEnum<T>::v()
<< " but got: " << out->dtype();
@@ -422,9 +506,9 @@ bool SparseTensor::ToDense(Tensor* out, bool initialize) {
template <typename T>
SparseTensor SparseTensor::Concat(
const gtl::ArraySlice<SparseTensor>& tensors) {
- CHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
+ DCHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
const int dims = tensors[0].dims_;
- CHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
+ DCHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
auto order_0 = tensors[0].order();
const int primary_dim = order_0[0];
ShapeArray final_order(order_0.begin(), order_0.end());
@@ -434,17 +518,17 @@ SparseTensor SparseTensor::Concat(
bool fully_ordered = true;
for (const SparseTensor& st : tensors) {
- CHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
- CHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
+ DCHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
+ DCHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
<< "Concat requested with the wrong data type";
- CHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
- CHECK_EQ(st.order()[0], primary_dim)
+ DCHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
+ DCHECK_EQ(st.order()[0], primary_dim)
<< "All SparseTensors' order[0] must match. This is the concat dim.";
if (st.order() != final_order) fully_ordered = false;
const VarDimArray& st_shape = st.shape();
for (int d = 0; d < dims - 1; ++d) {
const int cdim = (d < primary_dim) ? d : d + 1;
- CHECK_EQ(final_shape[cdim], st_shape[cdim])
+ DCHECK_EQ(final_shape[cdim], st_shape[cdim])
<< "All SparseTensors' shapes must match except on the concat dim. "
<< "Concat dim: " << primary_dim
<< ", mismatched shape at dim: " << cdim
@@ -494,7 +578,8 @@ SparseTensor SparseTensor::Concat(
template <typename T>
std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
const int split_dim,
- const int num_split) {
+ const int num_split,
+ Status* status /* = nullptr */) {
std::vector<Tensor> output_indices;
std::vector<Tensor> output_values;
std::vector<TensorShape> output_shapes;
@@ -514,12 +599,18 @@ std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
const int split_dim_size = input_tensor.shape()[split_dim];
const int split_size = split_dim_size / num_split;
- CHECK(num_split > 0 && num_split <= split_dim_size) << "num_split must be in "
- "the interval (0, "
- << split_dim_size << "]";
- CHECK(split_dim >= 0 && split_dim < num_dim) << "num_dim must be in "
- "the interval [0, "
- << num_dim << ")";
+ if (!(num_split > 0 && num_split <= split_dim_size) && status != nullptr) {
+ *status = Status(error::INVALID_ARGUMENT,
+ strings::StrCat("num_split must be in the interval (0, ",
+ split_dim_size, "]"));
+ return {};
+ }
+ if (!(split_dim >= 0 && split_dim < num_dim)) {
+ *status = Status(
+ error::INVALID_ARGUMENT,
+ strings::StrCat("num_dim must be in the interval [0, ", num_dim, ")"));
+ return {};
+ }
const int residual = split_dim_size % num_split;
for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) {
@@ -559,13 +650,28 @@ std::vector<SparseTensor> SparseTensor::Split(const SparseTensor& input_tensor,
std::vector<SparseTensor> output_tensors;
output_tensors.reserve(num_split);
for (int i = 0; i < num_split; ++i) {
- output_tensors.emplace_back(output_indices[i], output_values[i],
- output_shapes[i]);
+ SparseTensor tensor;
+ Status create_status =
+ Create(output_indices[i], output_values[i], output_shapes[i], &tensor);
+ if (!create_status.ok() && status != nullptr) {
+ *status = create_status;
+ return {};
+ }
+ output_tensors.push_back(std::move(tensor));
}
return output_tensors;
}
template <typename T>
+Status SparseTensor::Split(const SparseTensor& input_tensor,
+ const int split_dim, const int num_split,
+ std::vector<SparseTensor>* result) {
+ Status status;
+ *result = Split<T>(input_tensor, split_dim, num_split, &status);
+ return status;
+}
+
+template <typename T>
SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor,
const gtl::ArraySlice<int64>& start,
const gtl::ArraySlice<int64>& size) {
@@ -643,4 +749,4 @@ SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor,
} // namespace sparse
} // namespace tensorflow
-#endif // TENSORFLOW_UTIL_SPARSE_SPARSE_TENSOR_H_
+#endif // TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
diff --git a/tensorflow/core/util/sparse/sparse_tensor_test.cc b/tensorflow/core/util/sparse/sparse_tensor_test.cc
index 85de032085..5578e42625 100644
--- a/tensorflow/core/util/sparse/sparse_tensor_test.cc
+++ b/tensorflow/core/util/sparse/sparse_tensor_test.cc
@@ -94,9 +94,12 @@ TEST(SparseTensorTest, SparseTensorInvalidIndicesType) {
const int NDIM = 3;
Tensor ix(DT_INT32, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "indices must be type int64");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
@@ -104,9 +107,12 @@ TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM, 1}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "indices must be a matrix");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidValues) {
@@ -114,9 +120,12 @@ TEST(SparseTensorTest, SparseTensorInvalidValues) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N, 1}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "vals must be a vec");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidN) {
@@ -124,9 +133,12 @@ TEST(SparseTensorTest, SparseTensorInvalidN) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N - 1}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2}),
- "indices and values rows .* must match");
+ EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
+ &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidOrder) {
@@ -134,18 +146,24 @@ TEST(SparseTensorTest, SparseTensorInvalidOrder) {
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10, 10}), {0, 1}),
- "Order length must be SparseTensor rank");
+ EXPECT_EQ(
+ SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1}, &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
+ SparseTensor result;
- EXPECT_DEATH(SparseTensor(ix, vals, TensorShape({10, 10}), {0, 1, 2}),
- "Shape rank must be SparseTensor rank");
+ EXPECT_EQ(
+ SparseTensor::Create(ix, vals, TensorShape({10, 10}), {0, 1, 2}, &result)
+ .code(),
+ error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorConstruction) {
@@ -169,7 +187,8 @@ TEST(SparseTensorTest, SparseTensorConstruction) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[2] = [2,0,0] is out of order",
@@ -210,7 +229,8 @@ TEST(SparseTensorTest, EmptySparseTensorAllowed) {
std::vector<int64> shape{10, 10, 10};
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), order);
@@ -227,7 +247,8 @@ TEST(SparseTensorTest, SortingWorksCorrectly) {
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({1000, 1000, 1000, 1000});
- SparseTensor st(ix, vals, shape);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
auto ix_t = ix.matrix<int64>();
@@ -266,7 +287,8 @@ TEST(SparseTensorTest, ValidateIndicesFindsInvalid) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<string>(order);
Status st_indices_valid = st.IndicesValid();
@@ -302,7 +324,8 @@ TEST(SparseTensorTest, SparseTensorCheckBoundaries) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<string>(order);
@@ -351,7 +374,8 @@ TEST(SparseTensorTest, SparseTensorToDenseTensor) {
TensorShape shape({4, 4, 5});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({4, 4, 5}));
st.ToDense<string>(&dense);
@@ -390,7 +414,8 @@ TEST(SparseTensorTest, SparseTensorToLargerDenseTensor) {
TensorShape shape({4, 4, 5});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({10, 10, 10}));
st.ToDense<string>(&dense);
@@ -433,7 +458,8 @@ TEST(SparseTensorTest, SparseTensorGroup) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<int32>(order);
std::vector<std::vector<int64> > groups;
@@ -521,7 +547,8 @@ TEST(SparseTensorTest, Concat) {
TensorShape shape({10, 10, 10});
std::vector<int64> order{0, 1, 2};
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<string>(order);
TF_EXPECT_OK(st.IndicesValid());
@@ -551,7 +578,9 @@ TEST(SparseTensorTest, Concat) {
// Concat works if non-primary ix is out of order, but output order
// is not defined
- SparseTensor st_ooo(ix, vals, shape, {0, 2, 1}); // non-primary ix OOO
+ SparseTensor st_ooo;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 2, 1},
+ &st_ooo)); // non-primary ix OOO
SparseTensor conc_ooo = SparseTensor::Concat<string>({st, st, st, st_ooo});
std::vector<int64> expected_ooo{-1, -1, -1};
EXPECT_EQ(conc_ooo.order(), expected_ooo);
@@ -584,9 +613,11 @@ TEST(SparseTensorTest, Split) {
vals.vec<int64>()(2) = 3;
vals.vec<int64>()(3) = 4;
- SparseTensor st(ids, vals, TensorShape({4, 3}));
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
- std::vector<SparseTensor> st_list = SparseTensor::Split<int64>(st, 0, 2);
+ std::vector<SparseTensor> st_list;
+ TF_ASSERT_OK(SparseTensor::Split<int64>(st, 0, 2, &st_list));
EXPECT_EQ(st_list.size(), 2);
auto expected_shape = gtl::InlinedVector<int64, 8>{2, 3};
@@ -633,7 +664,8 @@ TEST(SparseTensorTest, Slice) {
vals.vec<int64>()(2) = 3;
vals.vec<int64>()(3) = 4;
- SparseTensor st(ids, vals, TensorShape({4, 3}));
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<int64> start(2, 0);
std::vector<int64> size(2);
@@ -662,7 +694,8 @@ TEST(SparseTensorTest, Dim0SparseTensorToDenseTensor) {
vals.scalar<int32>()() = 5;
TensorShape shape({});
- SparseTensor st(ix, vals, shape);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
Tensor dense(DT_INT32, TensorShape({}));
st.ToDense<int32>(&dense);
@@ -699,7 +732,8 @@ static void BM_SparseReorderFloat(int iters, int N32, int NDIM32) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
testing::StartTiming();
st.Reorder<float>(reorder);
@@ -740,7 +774,8 @@ static void BM_SparseReorderString(int iters, int N32, int NDIM32) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
- SparseTensor st(ix, vals, shape, order);
+ SparseTensor st;
+ TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
testing::StartTiming();
st.Reorder<string>(reorder);
diff --git a/tensorflow/core/util/stat_summarizer.cc b/tensorflow/core/util/stat_summarizer.cc
index a5c1fda102..2117042034 100644
--- a/tensorflow/core/util/stat_summarizer.cc
+++ b/tensorflow/core/util/stat_summarizer.cc
@@ -133,7 +133,6 @@ void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
int64 first_node_start_us =
step_stats.dev_stats(0).node_stats(0).all_start_micros();
- std::map<std::string, Detail> details;
int node_num = 0;
for (const auto& ds : step_stats.dev_stats()) {
@@ -177,22 +176,15 @@ void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
++node_num;
const int64 curr_time = ns.all_end_rel_micros();
curr_total_us += curr_time;
- auto result = details.emplace(name, Detail());
auto output_result =
outputs_.emplace(name, std::vector<TensorDescription>());
std::vector<TensorDescription>* outputs = &(output_result.first->second);
- Detail* detail = &(result.first->second);
- detail->start_us.UpdateStat(ns.all_start_micros() - first_node_start_us);
- detail->rel_end_us.UpdateStat(curr_time);
+ int64_t start_us = (ns.all_start_micros() - first_node_start_us);
+ int64_t rel_end_us = curr_time;
// If this is the first pass, initialize some values.
- if (result.second) {
- detail->name = name;
- detail->type = op_type;
-
- detail->run_order = node_num;
-
+ if (output_result.second) {
outputs->resize(ns.output_size());
for (const auto& output : ns.output()) {
const int32 slot = output.slot();
@@ -202,7 +194,6 @@ void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
}
(*outputs)[slot] = output.tensor_description();
}
- detail->times_called = 0;
}
int64 curr_node_mem = 0;
@@ -210,11 +201,10 @@ void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
const int64 mem_usage = mem.total_bytes();
curr_node_mem += mem_usage;
}
- detail->mem_used.UpdateStat(curr_node_mem);
- mem_total += curr_node_mem;
+ stats_calculator_->AddNodeStats(name, op_type, node_num, start_us,
+ rel_end_us, curr_node_mem);
- ++detail->times_called;
- stats_calculator_->UpdateDetails(details);
+ mem_total += curr_node_mem;
Validate(outputs, ns);
}
diff --git a/tensorflow/core/util/stats_calculator.cc b/tensorflow/core/util/stats_calculator.cc
index c4befbdb84..eb07754650 100644
--- a/tensorflow/core/util/stats_calculator.cc
+++ b/tensorflow/core/util/stats_calculator.cc
@@ -272,9 +272,24 @@ std::string StatsCalculator::GetOutputString() const {
return stream.str();
}
-void StatsCalculator::UpdateDetails(
- const std::map<std::string, Detail>& details) {
- details_.insert(details.begin(), details.end());
+void StatsCalculator::AddNodeStats(const std::string& name,
+ const std::string& type, int64_t run_order,
+ int64_t start_us, int64_t rel_end_us,
+ int64_t mem_used) {
+ Detail* detail = nullptr;
+ if (details_.find(name) == details_.end()) {
+ details_.insert({name, {}});
+ detail = &details_.at(name);
+ detail->type = type;
+ detail->name = name;
+ detail->run_order = run_order;
+ } else {
+ detail = &details_.at(name);
+ }
+ detail->start_us.UpdateStat(start_us);
+ detail->rel_end_us.UpdateStat(rel_end_us);
+ detail->mem_used.UpdateStat(mem_used);
+ detail->times_called++;
}
} // namespace tensorflow
diff --git a/tensorflow/core/util/stats_calculator.h b/tensorflow/core/util/stats_calculator.h
index 39cef816f1..e191737bb2 100644
--- a/tensorflow/core/util/stats_calculator.h
+++ b/tensorflow/core/util/stats_calculator.h
@@ -163,7 +163,10 @@ class StatsCalculator {
};
const std::map<std::string, Detail>& GetDetails() const { return details_; }
- void UpdateDetails(const std::map<std::string, Detail>& details);
+
+ void AddNodeStats(const std::string& name, const std::string& type,
+ int64_t run_order, int64_t start_us, int64_t rel_end_us,
+ int64_t mem_used);
private:
void OrderNodesByMetric(SortingMetric sorting_metric,
diff --git a/tensorflow/core/util/stats_calculator_test.cc b/tensorflow/core/util/stats_calculator_test.cc
new file mode 100644
index 0000000000..00d7bfc2f9
--- /dev/null
+++ b/tensorflow/core/util/stats_calculator_test.cc
@@ -0,0 +1,76 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "tensorflow/core/util/stats_calculator.h"
+#include "tensorflow/core/platform/test.h"
+
+namespace tensorflow {
+namespace {
+
+using Detail = StatsCalculator::Detail;
+
+TEST(StatsCalculatorTest, TotalTimeMs) {
+ auto options = StatSummarizerOptions();
+ StatsCalculator calc(options);
+
+ EXPECT_EQ(0, calc.num_runs());
+ calc.UpdateRunTotalUs(1);
+
+ EXPECT_EQ(1, calc.num_runs());
+ calc.UpdateRunTotalUs(2);
+
+ EXPECT_EQ(2, calc.num_runs());
+ auto run_time_us = calc.run_total_us();
+ EXPECT_EQ(1, run_time_us.min());
+ EXPECT_FLOAT_EQ(1.5, run_time_us.avg());
+}
+
+TEST(StatsCalculatorTest, AddNodeStatsUpdate) {
+ auto options = StatSummarizerOptions();
+ StatsCalculator calc(options);
+ EXPECT_TRUE(calc.GetDetails().empty());
+
+ const int64_t node1_run_order = 1;
+ const int64_t run1_start_us = 1;
+ const int64_t run1_end_us = 2;
+ const int64_t run1_mem_used = 45;
+ calc.AddNodeStats("node1", "type_1", node1_run_order, run1_start_us,
+ run1_end_us, run1_mem_used);
+ ASSERT_EQ(1, calc.GetDetails().size());
+ const Detail& detail = calc.GetDetails().at("node1");
+ EXPECT_EQ(1, detail.times_called);
+ EXPECT_EQ("node1", detail.name);
+ EXPECT_EQ("type_1", detail.type);
+ EXPECT_EQ(node1_run_order, detail.run_order);
+
+ const int64_t run2_start_us = 3;
+ const int64_t run2_end_us = 5;
+ const int64_t run2_mem_used = 145;
+ calc.AddNodeStats("node1", "type_1", node1_run_order, run2_start_us,
+ run2_end_us, run2_mem_used);
+ EXPECT_EQ(1, calc.GetDetails().size());
+
+ EXPECT_EQ(2, detail.times_called);
+ EXPECT_EQ("node1", detail.name);
+ EXPECT_EQ("type_1", detail.type);
+ EXPECT_EQ(node1_run_order, detail.run_order);
+
+ EXPECT_EQ(run1_start_us + run2_start_us, detail.start_us.sum());
+ EXPECT_EQ(run1_end_us + run2_end_us, detail.rel_end_us.sum());
+ EXPECT_EQ(run1_mem_used + run2_mem_used, detail.mem_used.sum());
+}
+
+} // namespace
+} // namespace tensorflow
diff --git a/tensorflow/core/util/tensor_format.cc b/tensorflow/core/util/tensor_format.cc
index 33ab87aa78..a5f7ecf0d1 100644
--- a/tensorflow/core/util/tensor_format.cc
+++ b/tensorflow/core/util/tensor_format.cc
@@ -18,7 +18,7 @@ limitations under the License.
namespace tensorflow {
string GetConvnetDataFormatAttrString() {
- return "data_format: { 'NHWC', 'NCHW', 'HWNC', 'HWCN' } = 'NHWC' ";
+ return "data_format: { 'NHWC', 'NCHW' } = 'NHWC' ";
}
string GetConvnet3dDataFormatAttrString() {
diff --git a/tensorflow/docs_src/extend/new_data_formats.md b/tensorflow/docs_src/extend/new_data_formats.md
index d1d1f69766..abbf47910e 100644
--- a/tensorflow/docs_src/extend/new_data_formats.md
+++ b/tensorflow/docs_src/extend/new_data_formats.md
@@ -77,18 +77,24 @@ can be used as a starting point for your implementation:
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
-namespace tensorflow {
+namespace myproject {
namespace {
-class MyReaderDatasetOp : public DatasetOpKernel {
+using ::tensorflow::DT_STRING;
+using ::tensorflow::PartialTensorShape;
+using ::tensorflow::Status;
+
+class MyReaderDatasetOp : public tensorflow::DatasetOpKernel {
public:
- MyReaderDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
+ MyReaderDatasetOp(tensorflow::OpKernelConstruction* ctx)
+ : DatasetOpKernel(ctx) {
// Parse and validate any attrs that define the dataset using
// `ctx->GetAttr()`, and store them in member variables.
}
- void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
+ void MakeDataset(tensorflow::OpKernelContext* ctx,
+ tensorflow::DatasetBase** output) override {
// Parse and validate any input tensors 0that define the dataset using
// `ctx->input()` or the utility function
// `ParseScalarArgument<T>(ctx, &arg)`.
@@ -99,14 +105,14 @@ class MyReaderDatasetOp : public DatasetOpKernel {
}
private:
- class Dataset : public GraphDatasetBase {
+ class Dataset : public tensorflow::GraphDatasetBase {
public:
- Dataset(OpKernelContext* ctx) : GraphDatasetBase(ctx) {}
+ Dataset(tensorflow::OpKernelContext* ctx) : GraphDatasetBase(ctx) {}
- std::unique_ptr<IteratorBase> MakeIteratorInternal(
+ std::unique_ptr<tensorflow::IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
- return std::unique_ptr<IteratorBase>(
- new Iterator({this, strings::StrCat(prefix, "::MyReader")}));
+ return std::unique_ptr<tensorflow::IteratorBase>(new Iterator(
+ {this, tensorflow::strings::StrCat(prefix, "::MyReader")}));
}
// Record structure: Each record is represented by a scalar string tensor.
@@ -114,8 +120,8 @@ class MyReaderDatasetOp : public DatasetOpKernel {
// Dataset elements can have a fixed number of components of different
// types and shapes; replace the following two methods to customize this
// aspect of the dataset.
- const DataTypeVector& output_dtypes() const override {
- static DataTypeVector* dtypes = new DataTypeVector({DT_STRING});
+ const tensorflow::DataTypeVector& output_dtypes() const override {
+ static auto* const dtypes = new tensorflow::DataTypeVector({DT_STRING});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
@@ -132,16 +138,16 @@ class MyReaderDatasetOp : public DatasetOpKernel {
// Implement this method if you want to be able to save and restore
// instances of this dataset (and any iterators over it).
Status AsGraphDefInternal(DatasetGraphDefBuilder* b,
- Node** output) const override {
+ tensorflow::Node** output) const override {
// Construct nodes to represent any of the input tensors from this
// object's member variables using `b->AddScalar()` and `b->AddVector()`.
- std::vector<Node*> input_tensors;
+ std::vector<tensorflow::Node*> input_tensors;
TF_RETURN_IF_ERROR(b->AddDataset(this, input_tensors, output));
return Status::OK();
}
private:
- class Iterator : public DatasetIterator<Dataset> {
+ class Iterator : public tensorflow::DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
@@ -158,15 +164,15 @@ class MyReaderDatasetOp : public DatasetOpKernel {
// return `Status::OK()`.
// 3. If an error occurs, return an error status using one of the helper
// functions from "tensorflow/core/lib/core/errors.h".
- Status GetNextInternal(IteratorContext* ctx,
- std::vector<Tensor>* out_tensors,
+ Status GetNextInternal(tensorflow::IteratorContext* ctx,
+ std::vector<tensorflow::Tensor>* out_tensors,
bool* end_of_sequence) override {
// NOTE: `GetNextInternal()` may be called concurrently, so it is
// recommended that you protect the iterator state with a mutex.
- mutex_lock l(mu_);
+ tensorflow::mutex_lock l(mu_);
if (i_ < 10) {
// Create a scalar string tensor and add it to the output.
- Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
+ tensorflow::Tensor record_tensor(ctx->allocator({}), DT_STRING, {});
record_tensor.scalar<string>()() = "MyReader!";
out_tensors->emplace_back(std::move(record_tensor));
++i_;
@@ -183,20 +189,20 @@ class MyReaderDatasetOp : public DatasetOpKernel {
//
// Implement these two methods if you want to be able to save and restore
// instances of this iterator.
- Status SaveInternal(IteratorStateWriter* writer) override {
- mutex_lock l(mu_);
+ Status SaveInternal(tensorflow::IteratorStateWriter* writer) override {
+ tensorflow::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name("i"), i_));
return Status::OK();
}
- Status RestoreInternal(IteratorContext* ctx,
- IteratorStateReader* reader) override {
- mutex_lock l(mu_);
+ Status RestoreInternal(tensorflow::IteratorContext* ctx,
+ tensorflow::IteratorStateReader* reader) override {
+ tensorflow::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name("i"), &i_));
return Status::OK();
}
private:
- mutex mu_;
+ tensorflow::mutex mu_;
int64 i_ GUARDED_BY(mu_);
};
};
@@ -211,14 +217,14 @@ class MyReaderDatasetOp : public DatasetOpKernel {
REGISTER_OP("MyReaderDataset")
.Output("handle: variant")
.SetIsStateful()
- .SetShapeFn(shape_inference::ScalarShape);
+ .SetShapeFn(tensorflow::shape_inference::ScalarShape);
// Register the kernel implementation for MyReaderDataset.
-REGISTER_KERNEL_BUILDER(Name("MyReaderDataset").Device(DEVICE_CPU),
+REGISTER_KERNEL_BUILDER(Name("MyReaderDataset").Device(tensorflow::DEVICE_CPU),
MyReaderDatasetOp);
} // namespace
-} // namespace tensorflow
+} // namespace myproject
```
The last step is to build the C++ code and add a Python wrapper. The easiest way
diff --git a/tensorflow/docs_src/guide/autograph.md b/tensorflow/docs_src/guide/autograph.md
new file mode 100644
index 0000000000..823e1c6d6b
--- /dev/null
+++ b/tensorflow/docs_src/guide/autograph.md
@@ -0,0 +1,3 @@
+# AutoGraph: Easy control flow for graphs
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/guide/autograph.ipynb)
diff --git a/tensorflow/docs_src/guide/debugger.md b/tensorflow/docs_src/guide/debugger.md
index 8d78fe6fbd..f0e465214e 100644
--- a/tensorflow/docs_src/guide/debugger.md
+++ b/tensorflow/docs_src/guide/debugger.md
@@ -780,7 +780,7 @@ sess.run(b)
``` python
import numpy as np
-a = tf.Variable(np.ones[10], name="a")
+a = tf.Variable(np.ones(10), name="a")
b = tf.add(a, a, name="b")
sess = tf.Session()
sess.run(tf.global_variables_initializer())
diff --git a/tensorflow/docs_src/guide/eager.md b/tensorflow/docs_src/guide/eager.md
index e98206eef9..3b54d6d2bb 100644
--- a/tensorflow/docs_src/guide/eager.md
+++ b/tensorflow/docs_src/guide/eager.md
@@ -225,7 +225,7 @@ the tape backwards and then discard. A particular `tf.GradientTape` can only
compute one gradient; subsequent calls throw a runtime error.
```py
-w = tfe.Variable([[1.0]])
+w = tf.Variable([[1.0]])
with tf.GradientTape() as tape:
loss = w * w
@@ -260,8 +260,8 @@ def grad(weights, biases):
train_steps = 200
learning_rate = 0.01
# Start with arbitrary values for W and B on the same batch of data
-W = tfe.Variable(5.)
-B = tfe.Variable(10.)
+W = tf.Variable(5.)
+B = tf.Variable(10.)
print("Initial loss: {:.3f}".format(loss(W, B)))
@@ -407,11 +407,11 @@ with tf.device("/gpu:0"):
### Variables and optimizers
-`tfe.Variable` objects store mutable `tf.Tensor` values accessed during
+`tf.Variable` objects store mutable `tf.Tensor` values accessed during
training to make automatic differentiation easier. The parameters of a model can
be encapsulated in classes as variables.
-Better encapsulate model parameters by using `tfe.Variable` with
+Better encapsulate model parameters by using `tf.Variable` with
`tf.GradientTape`. For example, the automatic differentiation example above
can be rewritten:
@@ -419,8 +419,8 @@ can be rewritten:
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
- self.W = tfe.Variable(5., name='weight')
- self.B = tfe.Variable(10., name='bias')
+ self.W = tf.Variable(5., name='weight')
+ self.B = tf.Variable(10., name='bias')
def call(self, inputs):
return inputs * self.W + self.B
@@ -498,19 +498,19 @@ is removed, and is then deleted.
```py
with tf.device("gpu:0"):
- v = tfe.Variable(tf.random_normal([1000, 1000]))
+ v = tf.Variable(tf.random_normal([1000, 1000]))
v = None # v no longer takes up GPU memory
```
### Object-based saving
-`tfe.Checkpoint` can save and restore `tfe.Variable`s to and from
+`tf.train.Checkpoint` can save and restore `tf.Variable`s to and from
checkpoints:
```py
-x = tfe.Variable(10.)
+x = tf.Variable(10.)
-checkpoint = tfe.Checkpoint(x=x) # save as "x"
+checkpoint = tf.train.Checkpoint(x=x) # save as "x"
x.assign(2.) # Assign a new value to the variables and save.
save_path = checkpoint.save('./ckpt/')
@@ -523,18 +523,18 @@ checkpoint.restore(save_path)
print(x) # => 2.0
```
-To save and load models, `tfe.Checkpoint` stores the internal state of objects,
+To save and load models, `tf.train.Checkpoint` stores the internal state of objects,
without requiring hidden variables. To record the state of a `model`,
-an `optimizer`, and a global step, pass them to a `tfe.Checkpoint`:
+an `optimizer`, and a global step, pass them to a `tf.train.Checkpoint`:
```py
model = MyModel()
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
checkpoint_dir = ‘/path/to/model_dir’
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
-root = tfe.Checkpoint(optimizer=optimizer,
- model=model,
- optimizer_step=tf.train.get_or_create_global_step())
+root = tf.train.Checkpoint(optimizer=optimizer,
+ model=model,
+ optimizer_step=tf.train.get_or_create_global_step())
root.save(file_prefix=checkpoint_prefix)
# or
@@ -612,7 +612,7 @@ def line_search_step(fn, init_x, rate=1.0):
`tf.GradientTape` is a powerful interface for computing gradients, but there
is another [Autograd](https://github.com/HIPS/autograd)-style API available for
automatic differentiation. These functions are useful if writing math code with
-only tensors and gradient functions, and without `tfe.Variables`:
+only tensors and gradient functions, and without `tf.Variables`:
* `tfe.gradients_function` —Returns a function that computes the derivatives
of its input function parameter with respect to its arguments. The input
@@ -824,7 +824,7 @@ gives you eager's interactive experimentation and debuggability with the
distributed performance benefits of graph execution.
Write, debug, and iterate in eager execution, then import the model graph for
-production deployment. Use `tfe.Checkpoint` to save and restore model
+production deployment. Use `tf.train.Checkpoint` to save and restore model
variables, this allows movement between eager and graph execution environments.
See the examples in:
[tensorflow/contrib/eager/python/examples](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples).
diff --git a/tensorflow/docs_src/guide/index.md b/tensorflow/docs_src/guide/index.md
index eefdb9ceae..f78dfc9a89 100644
--- a/tensorflow/docs_src/guide/index.md
+++ b/tensorflow/docs_src/guide/index.md
@@ -16,15 +16,12 @@ works. The units are as follows:
## Estimators
-* @{$estimators} provides an introduction.
-* @{$premade_estimators}, introduces Estimators for machine learning.
-* @{$custom_estimators}, which demonstrates how to build and train models you
- design yourself.
-* @{$feature_columns}, which shows how an Estimator can handle a variety of input
- data types without changes to the model.
-* @{$datasets_for_estimators} describes using tf.data with estimators.
-* @{$checkpoints}, which explains how to save training progress and resume where
- you left off.
+* @{$estimators}, learn how to use Estimators for machine learning.
+* @{$premade_estimators}, the basics of premade Estimators.
+* @{$checkpoints}, save training progress and resume where you left off.
+* @{$feature_columns}, handle a variety of input data types without changes to the model.
+* @{$datasets_for_estimators}, use `tf.data` to input data.
+* @{$custom_estimators}, write your own Estimator.
## Accelerators
diff --git a/tensorflow/docs_src/guide/keras.md b/tensorflow/docs_src/guide/keras.md
index 1d846df104..2330fa03c7 100644
--- a/tensorflow/docs_src/guide/keras.md
+++ b/tensorflow/docs_src/guide/keras.md
@@ -467,13 +467,13 @@ JSON and YAML serialization formats:
json_string = model.to_json()
# Recreate the model (freshly initialized)
-fresh_model = keras.models.from_json(json_string)
+fresh_model = keras.models.model_from_json(json_string)
# Serializes a model to YAML format
yaml_string = model.to_yaml()
# Recreate the model
-fresh_model = keras.models.from_yaml(yaml_string)
+fresh_model = keras.models.model_from_yaml(yaml_string)
```
Caution: Subclassed models are not serializable because their architecture is
diff --git a/tensorflow/docs_src/guide/leftnav_files b/tensorflow/docs_src/guide/leftnav_files
index 357a2a1cb9..c4e235b41a 100644
--- a/tensorflow/docs_src/guide/leftnav_files
+++ b/tensorflow/docs_src/guide/leftnav_files
@@ -8,10 +8,10 @@ datasets.md
### Estimators
estimators.md: Introduction to Estimators
premade_estimators.md
-custom_estimators.md
+checkpoints.md
feature_columns.md
datasets_for_estimators.md
-checkpoints.md
+custom_estimators.md
### Accelerators
using_gpu.md
@@ -23,6 +23,7 @@ tensors.md
variables.md
graphs.md
saved_model.md
+autograph.md : Control flow
### ML Concepts
embedding.md
diff --git a/tensorflow/docs_src/guide/saved_model.md b/tensorflow/docs_src/guide/saved_model.md
index acc3d3ca0b..717488e7cc 100644
--- a/tensorflow/docs_src/guide/saved_model.md
+++ b/tensorflow/docs_src/guide/saved_model.md
@@ -2,9 +2,8 @@
The @{tf.train.Saver} class provides methods to save and restore models. The
@{tf.saved_model.simple_save} function is an easy way to build a
-@{tf.saved_model$saved model} suitable for serving.
-[Estimators](@{$guide/estimators}) automatically save and restore
-variables in the `model_dir`.
+@{tf.saved_model$saved model} suitable for serving. [Estimators](./estimators)
+automatically save and restore variables in the `model_dir`.
## Save and restore variables
diff --git a/tensorflow/docs_src/install/index.md b/tensorflow/docs_src/install/index.md
index c2e5a991d4..55481cc400 100644
--- a/tensorflow/docs_src/install/index.md
+++ b/tensorflow/docs_src/install/index.md
@@ -1,36 +1,39 @@
-# Installing TensorFlow
+# Install TensorFlow
-We've built and tested TensorFlow on the following 64-bit laptop/desktop
-operating systems:
+Note: Run the [TensorFlow tutorials](../tutorials) in a pre-configured
+[Colab notebook environment](https://colab.research.google.com/notebooks/welcome.ipynb){: .external},
+without installation.
+
+TensorFlow is built and tested on the following 64-bit operating systems:
* macOS 10.12.6 (Sierra) or later.
* Ubuntu 16.04 or later
* Windows 7 or later.
* Raspbian 9.0 or later.
-Although you might be able to install TensorFlow on other laptop or desktop
-systems, we only support (and only fix issues in) the preceding configurations.
+While TensorFlow may work on other systems, we only support—and fix issues in—the
+systems listed above.
The following guides explain how to install a version of TensorFlow
that enables you to write applications in Python:
- * @{$install_linux$Installing TensorFlow on Ubuntu}
- * @{$install_mac$Installing TensorFlow on macOS}
- * @{$install_windows$Installing TensorFlow on Windows}
- * @{$install_raspbian$Installing TensorFlow on a Raspberry Pi}
- * @{$install_sources$Installing TensorFlow from Sources}
+ * @{$install_linux$Install TensorFlow on Ubuntu}
+ * @{$install_mac$Install TensorFlow on macOS}
+ * @{$install_windows$Install TensorFlow on Windows}
+ * @{$install_raspbian$Install TensorFlow on a Raspberry Pi}
+ * @{$install_sources$Install TensorFlow from source code}
Many aspects of the Python TensorFlow API changed from version 0.n to 1.0.
The following guide explains how to migrate older TensorFlow applications
to Version 1.0:
- * @{$migration$Transitioning to TensorFlow 1.0}
+ * @{$migration$Transition to TensorFlow 1.0}
The following guides explain how to install TensorFlow libraries for use in
other programming languages. These APIs are aimed at deploying TensorFlow
models in applications and are not as extensive as the Python APIs.
- * @{$install_java$Installing TensorFlow for Java}
- * @{$install_c$Installing TensorFlow for C}
- * @{$install_go$Installing TensorFlow for Go}
+ * @{$install_java$Install TensorFlow for Java}
+ * @{$install_c$Install TensorFlow for C}
+ * @{$install_go$Install TensorFlow for Go}
diff --git a/tensorflow/docs_src/install/install_c.md b/tensorflow/docs_src/install/install_c.md
index 2901848745..cf869e8655 100644
--- a/tensorflow/docs_src/install/install_c.md
+++ b/tensorflow/docs_src/install/install_c.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for C
+# Install TensorFlow for C
TensorFlow provides a C API defined in
[`c_api.h`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/c/c_api.h),
@@ -38,7 +38,7 @@ enable TensorFlow for C:
OS="linux" # Change to "darwin" for macOS
TARGET_DIRECTORY="/usr/local"
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.9.0-rc0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-${OS}-x86_64-1.9.0.tar.gz" |
sudo tar -C $TARGET_DIRECTORY -xz
The `tar` command extracts the TensorFlow C library into the `lib`
diff --git a/tensorflow/docs_src/install/install_go.md b/tensorflow/docs_src/install/install_go.md
index 2c126df5aa..4ec7e42773 100644
--- a/tensorflow/docs_src/install/install_go.md
+++ b/tensorflow/docs_src/install/install_go.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for Go
+# Install TensorFlow for Go
TensorFlow provides APIs for use in Go programs. These APIs are particularly
well-suited to loading models created in Python and executing them within
@@ -38,7 +38,7 @@ steps to install this library and enable TensorFlow for Go:
TF_TYPE="cpu" # Change to "gpu" for GPU support
TARGET_DIRECTORY='/usr/local'
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.9.0-rc0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-${TF_TYPE}-$(go env GOOS)-x86_64-1.9.0.tar.gz" |
sudo tar -C $TARGET_DIRECTORY -xz
The `tar` command extracts the TensorFlow C library into the `lib`
diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md
index 692dfc9cef..c5f760d254 100644
--- a/tensorflow/docs_src/install/install_java.md
+++ b/tensorflow/docs_src/install/install_java.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow for Java
+# Install TensorFlow for Java
TensorFlow provides APIs for use in Java programs. These APIs are particularly
well-suited to loading models created in Python and executing them within a
@@ -36,7 +36,7 @@ following to the project's `pom.xml` to use the TensorFlow Java APIs:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
- <version>1.9.0-rc0</version>
+ <version>1.9.0</version>
</dependency>
```
@@ -65,7 +65,7 @@ As an example, these steps will create a Maven project that uses TensorFlow:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>tensorflow</artifactId>
- <version>1.9.0-rc0</version>
+ <version>1.9.0</version>
</dependency>
</dependencies>
</project>
@@ -124,12 +124,12 @@ instead:
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>libtensorflow</artifactId>
- <version>1.9.0-rc0</version>
+ <version>1.9.0</version>
</dependency>
<dependency>
<groupId>org.tensorflow</groupId>
<artifactId>libtensorflow_jni_gpu</artifactId>
- <version>1.9.0-rc0</version>
+ <version>1.9.0</version>
</dependency>
```
@@ -148,7 +148,7 @@ refer to the simpler instructions above instead.
Take the following steps to install TensorFlow for Java on Linux or macOS:
1. Download
- [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.9.0-rc0.jar),
+ [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.9.0.jar),
which is the TensorFlow Java Archive (JAR).
2. Decide whether you will run TensorFlow for Java on CPU(s) only or with
@@ -167,7 +167,7 @@ Take the following steps to install TensorFlow for Java on Linux or macOS:
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
mkdir -p ./jni
curl -L \
- "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.9.0-rc0.tar.gz" |
+ "https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-${TF_TYPE}-${OS}-x86_64-1.9.0.tar.gz" |
tar -xz -C ./jni
### Install on Windows
@@ -175,10 +175,10 @@ Take the following steps to install TensorFlow for Java on Linux or macOS:
Take the following steps to install TensorFlow for Java on Windows:
1. Download
- [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.9.0-rc0.jar),
+ [libtensorflow.jar](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-1.9.0.jar),
which is the TensorFlow Java Archive (JAR).
2. Download the following Java Native Interface (JNI) file appropriate for
- [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.9.0-rc0.zip).
+ [TensorFlow for Java on Windows](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow_jni-cpu-windows-x86_64-1.9.0.zip).
3. Extract this .zip file.
__Note__: The native library (`tensorflow_jni.dll`) requires `msvcp140.dll` at runtime, which is included in the [Visual C++ 2015 Redistributable](https://www.microsoft.com/en-us/download/details.aspx?id=48145) package.
@@ -227,7 +227,7 @@ must be part of your `classpath`. For example, you can include the
downloaded `.jar` in your `classpath` by using the `-cp` compilation flag
as follows:
-<pre><b>javac -cp libtensorflow-1.9.0-rc0.jar HelloTF.java</b></pre>
+<pre><b>javac -cp libtensorflow-1.9.0.jar HelloTF.java</b></pre>
### Running
@@ -241,11 +241,11 @@ two files are available to the JVM:
For example, the following command line executes the `HelloTF` program on Linux
and macOS X:
-<pre><b>java -cp libtensorflow-1.9.0-rc0.jar:. -Djava.library.path=./jni HelloTF</b></pre>
+<pre><b>java -cp libtensorflow-1.9.0.jar:. -Djava.library.path=./jni HelloTF</b></pre>
And the following command line executes the `HelloTF` program on Windows:
-<pre><b>java -cp libtensorflow-1.9.0-rc0.jar;. -Djava.library.path=jni HelloTF</b></pre>
+<pre><b>java -cp libtensorflow-1.9.0.jar;. -Djava.library.path=jni HelloTF</b></pre>
If the program prints <tt>Hello from <i>version</i></tt>, you've successfully
installed TensorFlow for Java and are ready to use the API. If the program
diff --git a/tensorflow/docs_src/install/install_linux.md b/tensorflow/docs_src/install/install_linux.md
index f21c073a1b..3a9a01c57e 100644
--- a/tensorflow/docs_src/install/install_linux.md
+++ b/tensorflow/docs_src/install/install_linux.md
@@ -1,38 +1,38 @@
-# Installing TensorFlow on Ubuntu
+# Install TensorFlow on Ubuntu
This guide explains how to install TensorFlow on Ubuntu Linux. While these
-instructions may work on other Linux variants, they are tested and supported with
-the following system requirements:
-
-* 64-bit desktops or laptops
-* Ubuntu 16.04 or higher
+instructions may work on other Linux variants, they are tested and supported
+with the following system requirements:
+* 64-bit desktops or laptops
+* Ubuntu 16.04 or higher
## Choose which TensorFlow to install
The following TensorFlow variants are available for installation:
-* __TensorFlow with CPU support only__. If your system does not have a
- NVIDIA®&nbsp;GPU, you must install this version. This version of TensorFlow is
- usually easier to install, so even if you have an NVIDIA GPU, we recommend
- installing this version first.
-* __TensorFlow with GPU support__. TensorFlow programs usually run much faster on
- a GPU instead of a CPU. If you run performance-critical applications and your
- system has an NVIDIA®&nbsp;GPU that meets the prerequisites, you should install
- this version. See [TensorFlow GPU support](#NVIDIARequirements) for details.
-
+* __TensorFlow with CPU support only__. If your system does not have a
+ NVIDIA®&nbsp;GPU, you must install this version. This version of TensorFlow
+ is usually easier to install, so even if you have an NVIDIA GPU, we
+ recommend installing this version first.
+* __TensorFlow with GPU support__. TensorFlow programs usually run much faster
+ on a GPU instead of a CPU. If you run performance-critical applications and
+ your system has an NVIDIA®&nbsp;GPU that meets the prerequisites, you should
+ install this version. See [TensorFlow GPU support](#NVIDIARequirements) for
+ details.
## How to install TensorFlow
There are a few options to install TensorFlow on your machine:
-* [Use pip in a virtual environment](#InstallingVirtualenv) *(recommended)*
-* [Use pip in your system environment](#InstallingNativePip)
-* [Configure a Docker container](#InstallingDocker)
-* [Use pip in Anaconda](#InstallingAnaconda)
-* [Install TensorFlow from source](/install/install_sources)
+* [Use pip in a virtual environment](#InstallingVirtualenv) *(recommended)*
+* [Use pip in your system environment](#InstallingNativePip)
+* [Configure a Docker container](#InstallingDocker)
+* [Use pip in Anaconda](#InstallingAnaconda)
+* [Install TensorFlow from source](/install/install_sources)
<a name="InstallingVirtualenv"></a>
+
### Use `pip` in a virtual environment
Key Point: Using a virtual environment is the recommended install method.
@@ -41,8 +41,8 @@ The [Virtualenv](https://virtualenv.pypa.io/en/stable/) tool creates virtual
Python environments that are isolated from other Python development on the same
machine. In this scenario, you install TensorFlow and its dependencies within a
virtual environment that is available when *activated*. Virtualenv provides a
-reliable way to install and run TensorFlow while avoiding conflicts with the rest
-of the system.
+reliable way to install and run TensorFlow while avoiding conflicts with the
+rest of the system.
##### 1. Install Python, `pip`, and `virtualenv`.
@@ -62,10 +62,10 @@ To install these packages on Ubuntu:
</pre>
We *recommend* using `pip` version 8.1 or higher. If using a release before
-version 8.1, upgrade `pip`:
+version 8.1, upgrade `pip`:
<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo pip install -U pip</code>
+ <code class="devsite-terminal">pip install --upgrade pip</code>
</pre>
If not using Ubuntu and [setuptools](https://pypi.org/project/setuptools/) is
@@ -102,7 +102,7 @@ When the Virtualenv is activated, the shell prompt displays as `(venv) $`.
Within the active virtual environment, upgrade `pip`:
<pre class="prettyprint lang-bsh">
-(venv)$ pip install -U pip
+(venv)$ pip install --upgrade pip
</pre>
You can install other Python packages within the virtual environment without
@@ -112,15 +112,15 @@ affecting packages outside the `virtualenv`.
Choose one of the available TensorFlow packages for installation:
-* `tensorflow` —Current release for CPU
-* `tensorflow-gpu` —Current release with GPU support
-* `tf-nightly` —Nightly build for CPU
-* `tf-nightly-gpu` —Nightly build with GPU support
+* `tensorflow` —Current release for CPU
+* `tensorflow-gpu` —Current release with GPU support
+* `tf-nightly` —Nightly build for CPU
+* `tf-nightly-gpu` —Nightly build with GPU support
Within an active Virtualenv environment, use `pip` to install the package:
<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">pip install -U tensorflow</code>
+ <code class="devsite-terminal">pip install --upgrade tensorflow</code>
</pre>
Use `pip list` to show the packages installed in the virtual environment.
@@ -160,14 +160,14 @@ To uninstall TensorFlow, remove the Virtualenv directory you created in step 2:
<code class="devsite-terminal">rm -r ~/tensorflow/<var>venv</var></code>
</pre>
-
<a name="InstallingNativePip"></a>
+
### Use `pip` in your system environment
Use `pip` to install the TensorFlow package directly on your system without
using a container or virtual environment for isolation. This method is
-recommended for system administrators that want a TensorFlow installation that is
-available to everyone on a multi-user system.
+recommended for system administrators that want a TensorFlow installation that
+is available to everyone on a multi-user system.
Since a system install is not isolated, it could interfere with other
Python-based installations. But if you understand `pip` and your Python
@@ -195,10 +195,10 @@ To install these packages on Ubuntu:
</pre>
We *recommend* using `pip` version 8.1 or higher. If using a release before
-version 8.1, upgrade `pip`:
+version 8.1, upgrade `pip`:
<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo pip install -U pip</code>
+ <code class="devsite-terminal">pip install --upgrade pip</code>
</pre>
If not using Ubuntu and [setuptools](https://pypi.org/project/setuptools/) is
@@ -212,16 +212,16 @@ installed, use `easy_install` to install `pip`:
Choose one of the available TensorFlow packages for installation:
-* `tensorflow` —Current release for CPU
-* `tensorflow-gpu` —Current release with GPU support
-* `tf-nightly` —Nightly build for CPU
-* `tf-nightly-gpu` —Nightly build with GPU support
+* `tensorflow` —Current release for CPU
+* `tensorflow-gpu` —Current release with GPU support
+* `tf-nightly` —Nightly build for CPU
+* `tf-nightly-gpu` —Nightly build with GPU support
And use `pip` to install the package for Python 2 or 3:
<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo pip install -U tensorflow # Python 2.7</code>
- <code class="devsite-terminal">sudo pip3 install -U tensorflow # Python 3.n</code>
+ <code class="devsite-terminal">pip install --upgrade --user tensorflow # Python 2.7</code>
+ <code class="devsite-terminal">pip3 install --upgrade --user tensorflow # Python 3.n</code>
</pre>
Use `pip list` to show the packages installed on the system.
@@ -239,8 +239,8 @@ If the above steps failed, try installing the TensorFlow binary using the remote
URL of the `pip` package:
<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo pip install --upgrade <var>remote-pkg-URL</var> # Python 2.7</code>
- <code class="devsite-terminal">sudo pip3 install --upgrade <var>remote-pkg-URL</var> # Python 3.n</code>
+ <code class="devsite-terminal">pip install --user --upgrade <var>remote-pkg-URL</var> # Python 2.7</code>
+ <code class="devsite-terminal">pip3 install --user --upgrade <var>remote-pkg-URL</var> # Python 3.n</code>
</pre>
The <var>remote-pkg-URL</var> depends on the operating system, Python version,
@@ -255,42 +255,41 @@ encounter problems.
To uninstall TensorFlow on your system, use one of following commands:
<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo pip uninstall tensorflow # for Python 2.7</code>
- <code class="devsite-terminal">sudo pip3 uninstall tensorflow # for Python 3.n</code>
+ <code class="devsite-terminal">pip uninstall tensorflow # for Python 2.7</code>
+ <code class="devsite-terminal">pip3 uninstall tensorflow # for Python 3.n</code>
</pre>
<a name="InstallingDocker"></a>
+
### Configure a Docker container
-Docker completely isolates the TensorFlow installation
-from pre-existing packages on your machine. The Docker container contains
-TensorFlow and all its dependencies. Note that the Docker image can be quite
-large (hundreds of MBs). You might choose the Docker installation if you are
-incorporating TensorFlow into a larger application architecture that already
-uses Docker.
+Docker completely isolates the TensorFlow installation from pre-existing
+packages on your machine. The Docker container contains TensorFlow and all its
+dependencies. Note that the Docker image can be quite large (hundreds of MBs).
+You might choose the Docker installation if you are incorporating TensorFlow
+into a larger application architecture that already uses Docker.
Take the following steps to install TensorFlow through Docker:
- 1. Install Docker on your machine as described in the
- [Docker documentation](http://docs.docker.com/engine/installation/).
- 2. Optionally, create a Linux group called <code>docker</code> to allow
- launching containers without sudo as described in the
- [Docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/).
- (If you don't do this step, you'll have to use sudo each time
- you invoke Docker.)
- 3. To install a version of TensorFlow that supports GPUs, you must first
- install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), which
- is stored in github.
- 4. Launch a Docker container that contains one of the
- [TensorFlow binary images](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
+1. Install Docker on your machine as described in the
+ [Docker documentation](http://docs.docker.com/engine/installation/).
+2. Optionally, create a Linux group called <code>docker</code> to allow
+ launching containers without sudo as described in the
+ [Docker documentation](https://docs.docker.com/engine/installation/linux/linux-postinstall/).
+ (If you don't do this step, you'll have to use sudo each time you invoke
+ Docker.)
+3. To install a version of TensorFlow that supports GPUs, you must first
+ install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker), which is
+ stored in github.
+4. Launch a Docker container that contains one of the
+ [TensorFlow binary images](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
The remainder of this section explains how to launch a Docker container.
-
#### CPU-only
-To launch a Docker container with CPU-only support (that is, without
-GPU support), enter a command of the following format:
+To launch a Docker container with CPU-only support (that is, without GPU
+support), enter a command of the following format:
<pre>
$ docker run -it <i>-p hostPort:containerPort TensorFlowCPUImage</i>
@@ -298,29 +297,31 @@ $ docker run -it <i>-p hostPort:containerPort TensorFlowCPUImage</i>
where:
- * <tt><i>-p hostPort:containerPort</i></tt> is optional.
- If you plan to run TensorFlow programs from the shell, omit this option.
- If you plan to run TensorFlow programs as Jupyter notebooks, set both
- <tt><i>hostPort</i></tt> and <tt><i>containerPort</i></tt>
- to <tt>8888</tt>. If you'd like to run TensorBoard inside the container,
- add a second `-p` flag, setting both <i>hostPort</i> and <i>containerPort</i>
- to 6006.
- * <tt><i>TensorFlowCPUImage</i></tt> is required. It identifies the Docker
+* <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan to run
+ TensorFlow programs from the shell, omit this option. If you plan to run
+ TensorFlow programs as Jupyter notebooks, set both <tt><i>hostPort</i></tt>
+ and <tt><i>containerPort</i></tt> to <tt>8888</tt>. If you'd like to run
+ TensorBoard inside the container, add a second `-p` flag, setting both
+ <i>hostPort</i> and <i>containerPort</i> to 6006.
+* <tt><i>TensorFlowCPUImage</i></tt> is required. It identifies the Docker
container. Specify one of the following values:
- * <tt>tensorflow/tensorflow</tt>, which is the TensorFlow CPU binary image.
- * <tt>tensorflow/tensorflow:latest-devel</tt>, which is the latest
- TensorFlow CPU Binary image plus source code.
- * <tt>tensorflow/tensorflow:<i>version</i></tt>, which is the
- specified version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
- * <tt>tensorflow/tensorflow:<i>version</i>-devel</tt>, which is
- the specified version (for example, 1.1.0rc1) of the TensorFlow GPU
- binary image plus source code.
+
+ * <tt>tensorflow/tensorflow</tt>, which is the TensorFlow CPU binary
+ image.
+ * <tt>tensorflow/tensorflow:latest-devel</tt>, which is the latest
+ TensorFlow CPU Binary image plus source code.
+ * <tt>tensorflow/tensorflow:<i>version</i></tt>, which is the specified
+ version (for example, 1.1.0rc1) of TensorFlow CPU binary image.
+ * <tt>tensorflow/tensorflow:<i>version</i>-devel</tt>, which is the
+ specified version (for example, 1.1.0rc1) of the TensorFlow GPU binary
+ image plus source code.
TensorFlow images are available at
[dockerhub](https://hub.docker.com/r/tensorflow/tensorflow/).
-For example, the following command launches the latest TensorFlow CPU binary image
-in a Docker container from which you can run TensorFlow programs in a shell:
+For example, the following command launches the latest TensorFlow CPU binary
+image in a Docker container from which you can run TensorFlow programs in a
+shell:
<pre>
$ <b>docker run -it tensorflow/tensorflow bash</b>
@@ -336,10 +337,11 @@ $ <b>docker run -it -p 8888:8888 tensorflow/tensorflow</b>
Docker will download the TensorFlow binary image the first time you launch it.
-
#### GPU support
-To launch a Docker container with NVidia GPU support, enter a command of the following format (this [does not require any local CUDA installation](https://github.com/nvidia/nvidia-docker/wiki/CUDA#requirements)):
+To launch a Docker container with NVidia GPU support, enter a command of the
+following format (this
+[does not require any local CUDA installation](https://github.com/nvidia/nvidia-docker/wiki/CUDA#requirements)):
<pre>
$ <b>nvidia-docker run -it</b> <i>-p hostPort:containerPort TensorFlowGPUImage</i>
@@ -347,34 +349,34 @@ $ <b>nvidia-docker run -it</b> <i>-p hostPort:containerPort TensorFlowGPUImage</
where:
- * <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan
- to run TensorFlow programs from the shell, omit this option. If you plan
- to run TensorFlow programs as Jupyter notebooks, set both
- <tt><i>hostPort</i></tt> and <code><em>containerPort</em></code> to `8888`.
- * <i>TensorFlowGPUImage</i> specifies the Docker container. You must
- specify one of the following values:
- * <tt>tensorflow/tensorflow:latest-gpu</tt>, which is the latest
- TensorFlow GPU binary image.
- * <tt>tensorflow/tensorflow:latest-devel-gpu</tt>, which is
- the latest TensorFlow GPU Binary image plus source code.
- * <tt>tensorflow/tensorflow:<i>version</i>-gpu</tt>, which is the
- specified version (for example, 0.12.1) of the TensorFlow GPU
- binary image.
- * <tt>tensorflow/tensorflow:<i>version</i>-devel-gpu</tt>, which is
- the specified version (for example, 0.12.1) of the TensorFlow GPU
- binary image plus source code.
-
-We recommend installing one of the `latest` versions. For example, the
-following command launches the latest TensorFlow GPU binary image in a
-Docker container from which you can run TensorFlow programs in a shell:
+* <tt><i>-p hostPort:containerPort</i></tt> is optional. If you plan to run
+ TensorFlow programs from the shell, omit this option. If you plan to run
+ TensorFlow programs as Jupyter notebooks, set both <tt><i>hostPort</i></tt>
+ and <code><em>containerPort</em></code> to `8888`.
+* <i>TensorFlowGPUImage</i> specifies the Docker container. You must specify
+ one of the following values:
+ * <tt>tensorflow/tensorflow:latest-gpu</tt>, which is the latest
+ TensorFlow GPU binary image.
+ * <tt>tensorflow/tensorflow:latest-devel-gpu</tt>, which is the latest
+ TensorFlow GPU Binary image plus source code.
+ * <tt>tensorflow/tensorflow:<i>version</i>-gpu</tt>, which is the
+ specified version (for example, 0.12.1) of the TensorFlow GPU binary
+ image.
+ * <tt>tensorflow/tensorflow:<i>version</i>-devel-gpu</tt>, which is the
+ specified version (for example, 0.12.1) of the TensorFlow GPU binary
+ image plus source code.
+
+We recommend installing one of the `latest` versions. For example, the following
+command launches the latest TensorFlow GPU binary image in a Docker container
+from which you can run TensorFlow programs in a shell:
<pre>
$ <b>nvidia-docker run -it tensorflow/tensorflow:latest-gpu bash</b>
</pre>
-The following command also launches the latest TensorFlow GPU binary image
-in a Docker container. In this Docker container, you can run TensorFlow
-programs in a Jupyter notebook:
+The following command also launches the latest TensorFlow GPU binary image in a
+Docker container. In this Docker container, you can run TensorFlow programs in a
+Jupyter notebook:
<pre>
$ <b>nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:latest-gpu</b>
@@ -390,14 +392,12 @@ Docker will download the TensorFlow binary image the first time you launch it.
For more details see the
[TensorFlow docker readme](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/docker).
-
#### Next Steps
-You should now
-[validate your installation](#ValidateYourInstallation).
-
+You should now [validate your installation](#ValidateYourInstallation).
<a name="InstallingAnaconda"></a>
+
### Use `pip` in Anaconda
Anaconda provides the `conda` utility to create a virtual environment. However,
@@ -410,61 +410,59 @@ not tested on new TensorFlow releases.
Take the following steps to install TensorFlow in an Anaconda environment:
- 1. Follow the instructions on the
- [Anaconda download site](https://www.continuum.io/downloads)
- to download and install Anaconda.
+1. Follow the instructions on the
+ [Anaconda download site](https://www.continuum.io/downloads) to download and
+ install Anaconda.
- 2. Create a conda environment named <tt>tensorflow</tt> to run a version
- of Python by invoking the following command:
+2. Create a conda environment named <tt>tensorflow</tt> to run a version of
+ Python by invoking the following command:
<pre>$ <b>conda create -n tensorflow pip python=2.7 # or python=3.3, etc.</b></pre>
- 3. Activate the conda environment by issuing the following command:
+3. Activate the conda environment by issuing the following command:
<pre>$ <b>source activate tensorflow</b>
(tensorflow)$ # Your prompt should change </pre>
- 4. Issue a command of the following format to install
- TensorFlow inside your conda environment:
+4. Issue a command of the following format to install TensorFlow inside your
+ conda environment:
<pre>(tensorflow)$ <b>pip install --ignore-installed --upgrade</b> <i>tfBinaryURL</i></pre>
- where <code><em>tfBinaryURL</em></code> is the
- [URL of the TensorFlow Python package](#the_url_of_the_tensorflow_python_package).
- For example, the following command installs the CPU-only version of
- TensorFlow for Python 3.4:
+ where <code><em>tfBinaryURL</em></code> is the
+ [URL of the TensorFlow Python package](#the_url_of_the_tensorflow_python_package).
+ For example, the following command installs the CPU-only version of
+ TensorFlow for Python 3.4:
<pre>
(tensorflow)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp34-cp34m-linux_x86_64.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp34-cp34m-linux_x86_64.whl</b></pre>
<a name="ValidateYourInstallation"></a>
+
## Validate your installation
To validate your TensorFlow installation, do the following:
- 1. Ensure that your environment is prepared to run TensorFlow programs.
- 2. Run a short TensorFlow program.
-
+1. Ensure that your environment is prepared to run TensorFlow programs.
+2. Run a short TensorFlow program.
### Prepare your environment
-If you installed on native pip, Virtualenv, or Anaconda, then
-do the following:
+If you installed on native pip, Virtualenv, or Anaconda, then do the following:
- 1. Start a terminal.
- 2. If you installed with Virtualenv or Anaconda, activate your container.
- 3. If you installed TensorFlow source code, navigate to any
- directory *except* one containing TensorFlow source code.
+1. Start a terminal.
+2. If you installed with Virtualenv or Anaconda, activate your container.
+3. If you installed TensorFlow source code, navigate to any directory *except*
+ one containing TensorFlow source code.
-If you installed through Docker, start a Docker container
-from which you can run bash. For example:
+If you installed through Docker, start a Docker container from which you can run
+bash. For example:
<pre>
$ <b>docker run -it tensorflow/tensorflow bash</b>
</pre>
-
### Run a short TensorFlow program
Invoke python from your shell as follows:
@@ -486,94 +484,71 @@ TensorFlow programs:
<pre>Hello, TensorFlow!</pre>
-If the system outputs an error message instead of a greeting, see [Common
-installation problems](#common_installation_problems).
+If the system outputs an error message instead of a greeting, see
+[Common installation problems](#common_installation_problems).
To learn more, see the [TensorFlow tutorials](../tutorials/).
<a name="NVIDIARequirements"></a>
-## TensorFlow GPU support
-
-To install TensorFlow with GPU support, configure the following NVIDIA® software
-on your system:
-
-* [CUDA Toolkit 9.0](http://nvidia.com/cuda). For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/).
- Append the relevant CUDA pathnames to the `LD_LIBRARY_PATH` environmental
- variable as described in the NVIDIA documentation.
-* [cuDNN SDK v7](http://developer.nvidia.com/cudnn). For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/).
- Create the `CUDA_HOME` environment variable as described in the NVIDIA
- documentation.
-* A GPU card with CUDA Compute Capability 3.0 or higher for building TensorFlow
- from source. To use the TensorFlow binaries, version 3.5 or higher is required.
- See the [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a
- list of supported GPU cards.
-* [GPU drivers](http://nvidia.com/drivers) that support your version of the CUDA
- Toolkit.
-* The `libcupti-dev` library is the NVIDIA CUDA Profile Tools Interface. This
- library provides advanced profiling support. To install this library,
- use the following command for CUDA Toolkit >= 8.0:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo apt-get install cuda-command-line-tools</code>
-</pre>
-
-Add this path to the `LD_LIBRARY_PATH` environmental variable:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">export LD_LIBRARY_PATH=${LD_LIBRARY_PATH:+${LD_LIBRARY_PATH}:}/usr/local/cuda/extras/CUPTI/lib64</code>
-</pre>
-
-* *OPTIONAL*: For optimized performance during inference, install
- *NVIDIA&nbsp;TensorRT&nbsp;3.0*. To install the minimal amount of TensorRT
- runtime components required to use with the pre-built `tensorflow-gpu` package:
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">wget https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64/nvinfer-runtime-trt-repo-ubuntu1404-3.0.4-ga-cuda9.0_1.0-1_amd64.deb</code>
- <code class="devsite-terminal">sudo dpkg -i nvinfer-runtime-trt-repo-ubuntu1404-3.0.4-ga-cuda9.0_1.0-1_amd64.deb</code>
- <code class="devsite-terminal">sudo apt-get update</code>
- <code class="devsite-terminal">sudo apt-get install -y --allow-downgrades libnvinfer-dev libcudnn7-dev=7.0.5.15-1+cuda9.0 libcudnn7=7.0.5.15-1+cuda9.0</code>
-</pre>
-
-Note: For compatibility with the pre-built `tensorflow-gpu` package, use the
-Ubuntu *14.04* package of TensorRT (shown above). Use this even when installing
-on an Ubuntu 16.04 system.
-
-To build the TensorFlow-TensorRT integration module from source instead of using
-the pre-built binaries, see the
-[module documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/tensorrt#using-tensorrt-in-tensorflow).
-For detailed TensorRT installation instructions, see
-[NVIDIA's TensorRT documentation](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html).
-
-To avoid cuDNN version conflicts during later system upgrades, hold the cuDNN
-version at 7.0.5:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo apt-mark hold libcudnn7 libcudnn7-dev</code>
-</pre>
-
-To allow upgrades, remove the this hold:
-
-<pre class="prettyprint lang-bsh">
- <code class="devsite-terminal">sudo apt-mark unhold libcudnn7 libcudnn7-dev</code>
-</pre>
-
-If you have an earlier version of the preceding packages, upgrade to the
-specified versions. If upgrading is not possible, you can still run TensorFlow
-with GPU support by @{$install_sources}.
+## TensorFlow GPU support
+Note: Due to the number of libraries required, using [Docker](#InstallingDocker)
+is recommended over installing directly on the host system.
+
+The following NVIDIA® <i>hardware</i> must be installed on your system:
+
+* GPU card with CUDA Compute Capability 3.5 or higher. See
+ [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of
+ supported GPU cards.
+
+The following NVIDIA® <i>software</i> must be installed on your system:
+
+* [GPU drivers](http://nvidia.com/driver). CUDA 9.0 requires 384.x or higher.
+* [CUDA Toolkit 9.0](http://nvidia.com/cuda).
+* [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 7.0). Version 7.1 is
+ recommended.
+* [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
+ you also need to append its path to the `LD_LIBRARY_PATH` environment
+ variable: `export
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64`
+* *OPTIONAL*: [NCCL 2.2](https://developer.nvidia.com/nccl) to use TensorFlow
+ with multiple GPUs.
+* *OPTIONAL*:
+ [TensorRT](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html)
+ which can improve latency and throughput for inference for some models.
+
+To use a GPU with CUDA Compute Capability 3.0, or different versions of the
+preceding NVIDIA libraries see
+@{$install_sources$installing TensorFlow from Sources}. If using Ubuntu 16.04
+and possibly other Debian based linux distros, `apt-get` can be used with the
+NVIDIA repository to simplify installation.
+
+```bash
+# Adds NVIDIA package repository.
+sudo apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/7fa2af80.pub
+wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1604/x86_64/cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
+wget http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
+sudo dpkg -i cuda-repo-ubuntu1604_9.1.85-1_amd64.deb
+sudo dpkg -i nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb
+sudo apt-get update
+# Includes optional NCCL 2.x.
+sudo apt-get install cuda9.0 cuda-cublas-9-0 cuda-cufft-9-0 cuda-curand-9-0 \
+ cuda-cusolver-9-0 cuda-cusparse-9-0 libcudnn7=7.1.4.18-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 cuda-command-line-tools-9-0
+# Optionally install TensorRT runtime, must be done after above cuda install.
+sudo apt-get update
+sudo apt-get install libnvinfer4=4.1.2-1+cuda9.0
+```
## Common installation problems
We are relying on Stack Overflow to document TensorFlow installation problems
-and their remedies. The following table contains links to Stack Overflow
-answers for some common installation problems.
-If you encounter an error message or other
-installation problem not listed in the following table, search for it
-on Stack Overflow. If Stack Overflow doesn't show the error message,
-ask a new question about it on Stack Overflow and specify
-the `tensorflow` tag.
+and their remedies. The following table contains links to Stack Overflow answers
+for some common installation problems. If you encounter an error message or
+other installation problem not listed in the following table, search for it on
+Stack Overflow. If Stack Overflow doesn't show the error message, ask a new
+question about it on Stack Overflow and specify the `tensorflow` tag.
<table>
<tr> <th>Link to GitHub or Stack&nbsp;Overflow</th> <th>Error Message</th> </tr>
@@ -657,74 +632,67 @@ the `tensorflow` tag.
</table>
-
<a name="TF_PYTHON_URL"></a>
+
## The URL of the TensorFlow Python package
A few installation mechanisms require the URL of the TensorFlow Python package.
The value you specify depends on three factors:
- * operating system
- * Python version
- * CPU only vs. GPU support
+* operating system
+* Python version
+* CPU only vs. GPU support
This section documents the relevant values for Linux installations.
-
### Python 2.7
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp27-none-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp27-none-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp27-none-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
-
### Python 3.4
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp34-cp34m-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp34-cp34m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp34-cp34m-linux_x86_64.whl
</pre>
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
-
### Python 3.5
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp35-cp35m-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp35-cp35m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp35-cp35m-linux_x86_64.whl
</pre>
-
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
@@ -733,16 +701,14 @@ Note that GPU support requires the NVIDIA hardware and software described in
CPU only:
<pre>
-https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0rc0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-1.9.0-cp36-cp36m-linux_x86_64.whl
</pre>
-
GPU support:
<pre>
-https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0rc0-cp36-cp36m-linux_x86_64.whl
+https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.9.0-cp36-cp36m-linux_x86_64.whl
</pre>
-
Note that GPU support requires the NVIDIA hardware and software described in
[NVIDIA requirements to run TensorFlow with GPU support](#NVIDIARequirements).
diff --git a/tensorflow/docs_src/install/install_mac.md b/tensorflow/docs_src/install/install_mac.md
index c6f0c17924..1a7b2b815d 100644
--- a/tensorflow/docs_src/install/install_mac.md
+++ b/tensorflow/docs_src/install/install_mac.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on macOS
+# Install TensorFlow on macOS
This guide explains how to install TensorFlow on macOS. Although these
instructions might also work on other macOS variants, we have only
@@ -119,7 +119,7 @@ Take the following steps to install TensorFlow with Virtualenv:
TensorFlow in the active Virtualenv is as follows:
<pre> $ <b>pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0rc0-py3-none-any.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py3-none-any.whl</b></pre>
If you encounter installation problems, see
[Common Installation Problems](#common-installation-problems).
@@ -242,7 +242,7 @@ take the following steps:
issue the following command:
<pre> $ <b>sudo pip3 install --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0rc0-py3-none-any.whl</b> </pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py3-none-any.whl</b> </pre>
If the preceding command fails, see
[installation problems](#common-installation-problems).
@@ -350,7 +350,7 @@ Take the following steps to install TensorFlow in an Anaconda environment:
TensorFlow for Python 2.7:
<pre> (<i>targetDirectory</i>)$ <b>pip install --ignore-installed --upgrade \
- https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0rc0-py2-none-any.whl</b></pre>
+ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py2-none-any.whl</b></pre>
<a name="ValidateYourInstallation"></a>
@@ -517,7 +517,7 @@ The value you specify depends on your Python version.
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0rc0-py2-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py2-none-any.whl
</pre>
@@ -525,5 +525,5 @@ https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0rc0-py2-none-a
<pre>
-https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0rc0-py3-none-any.whl
+https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-1.9.0-py3-none-any.whl
</pre>
diff --git a/tensorflow/docs_src/install/install_raspbian.md b/tensorflow/docs_src/install/install_raspbian.md
index 46c4944ca7..58a5285c78 100644
--- a/tensorflow/docs_src/install/install_raspbian.md
+++ b/tensorflow/docs_src/install/install_raspbian.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on Raspbian
+# Install TensorFlow on Raspbian
This guide explains how to install TensorFlow on a Raspberry Pi running
Raspbian. Although these instructions might also work on other Pi variants, we
diff --git a/tensorflow/docs_src/install/install_sources.md b/tensorflow/docs_src/install/install_sources.md
index fc1f6d05bd..31dcad64d4 100644
--- a/tensorflow/docs_src/install/install_sources.md
+++ b/tensorflow/docs_src/install/install_sources.md
@@ -1,28 +1,27 @@
-# Installing TensorFlow from Sources
+# Install TensorFlow from Sources
-This guide explains how to build TensorFlow sources into a TensorFlow
-binary and how to install that TensorFlow binary. Note that we provide
-well-tested, pre-built TensorFlow binaries for Ubuntu, macOS, and Windows
-systems. In addition, there are pre-built TensorFlow
-[docker images](https://hub.docker.com/r/tensorflow/tensorflow/).
-So, don't build a TensorFlow binary yourself unless you are very
-comfortable building complex packages from source and dealing with
-the inevitable aftermath should things not go exactly as documented.
+This guide explains how to build TensorFlow sources into a TensorFlow binary and
+how to install that TensorFlow binary. Note that we provide well-tested,
+pre-built TensorFlow binaries for Ubuntu, macOS, and Windows systems. In
+addition, there are pre-built TensorFlow
+[docker images](https://hub.docker.com/r/tensorflow/tensorflow/). So, don't
+build a TensorFlow binary yourself unless you are very comfortable building
+complex packages from source and dealing with the inevitable aftermath should
+things not go exactly as documented.
-If the last paragraph didn't scare you off, welcome. This guide explains
-how to build TensorFlow on 64-bit desktops and laptops running either of
-the following operating systems:
+If the last paragraph didn't scare you off, welcome. This guide explains how to
+build TensorFlow on 64-bit desktops and laptops running either of the following
+operating systems:
* Ubuntu
* macOS X
-Note: Some users have successfully built and installed TensorFlow from
-sources on non-supported systems. Please remember that we do not fix
-issues stemming from these attempts.
+Note: Some users have successfully built and installed TensorFlow from sources
+on non-supported systems. Please remember that we do not fix issues stemming
+from these attempts.
-We **do not support** building TensorFlow on Windows. That said, if you'd
-like to try to build TensorFlow on Windows anyway, use either of the
-following:
+We **do not support** building TensorFlow on Windows. That said, if you'd like
+to try to build TensorFlow on Windows anyway, use either of the following:
* [Bazel on Windows](https://bazel.build/versions/master/docs/windows.html)
* [TensorFlow CMake build](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/cmake)
@@ -32,38 +31,33 @@ instructions. Older CPUs may not be able to execute these binaries.
## Determine which TensorFlow to install
-You must choose one of the following types of TensorFlow to build and
-install:
-
-* **TensorFlow with CPU support only**. If your system does not have a
- NVIDIA® GPU, build and install this version. Note that this version of
- TensorFlow is typically easier to build and install, so even if you
- have an NVIDIA GPU, we recommend building and installing this version
- first.
-* **TensorFlow with GPU support**. TensorFlow programs typically run
- significantly faster on a GPU than on a CPU. Therefore, if your system
- has a NVIDIA GPU and you need to run performance-critical applications,
- you should ultimately build and install this version.
- Beyond the NVIDIA GPU itself, your system must also fulfill the NVIDIA
- software requirements described in one of the following documents:
+You must choose one of the following types of TensorFlow to build and install:
- * @{$install_linux#NVIDIARequirements$Installing TensorFlow on Ubuntu}
- * @{$install_mac#NVIDIARequirements$Installing TensorFlow on macOS}
+* **TensorFlow with CPU support only**. If your system does not have a NVIDIA®
+ GPU, build and install this version. Note that this version of TensorFlow is
+ typically easier to build and install, so even if you have an NVIDIA GPU, we
+ recommend building and installing this version first.
+* **TensorFlow with GPU support**. TensorFlow programs typically run
+ significantly faster on a GPU than on a CPU. Therefore, if your system has a
+ NVIDIA GPU and you need to run performance-critical applications, you should
+ ultimately build and install this version. Beyond the NVIDIA GPU itself,
+ your system must also fulfill the NVIDIA software requirements described in
+ one of the following documents:
+ * @ {$install_linux#NVIDIARequirements$Installing TensorFlow on Ubuntu}
+ * @ {$install_mac#NVIDIARequirements$Installing TensorFlow on macOS}
## Clone the TensorFlow repository
-Start the process of building TensorFlow by cloning a TensorFlow
-repository.
+Start the process of building TensorFlow by cloning a TensorFlow repository.
To clone **the latest** TensorFlow repository, issue the following command:
<pre>$ <b>git clone https://github.com/tensorflow/tensorflow</b> </pre>
-The preceding <code>git clone</code> command creates a subdirectory
-named `tensorflow`. After cloning, you may optionally build a
-**specific branch** (such as a release branch) by invoking the
-following commands:
+The preceding <code>git clone</code> command creates a subdirectory named
+`tensorflow`. After cloning, you may optionally build a **specific branch**
+(such as a release branch) by invoking the following commands:
<pre>
$ <b>cd tensorflow</b>
@@ -75,38 +69,34 @@ issue the following command:
<pre>$ <b>git checkout r1.0</b></pre>
-Next, you must prepare your environment for
-[Linux](#PrepareLinux)
-or
+Next, you must prepare your environment for [Linux](#PrepareLinux) or
[macOS](#PrepareMac)
-
<a name="PrepareLinux"></a>
-## Prepare environment for Linux
-Before building TensorFlow on Linux, install the following build
-tools on your system:
+## Prepare environment for Linux
- * bazel
- * TensorFlow Python dependencies
- * optionally, NVIDIA packages to support TensorFlow for GPU.
+Before building TensorFlow on Linux, install the following build tools on your
+system:
+* bazel
+* TensorFlow Python dependencies
+* optionally, NVIDIA packages to support TensorFlow for GPU.
### Install Bazel
If bazel is not installed on your system, install it now by following
[these directions](https://bazel.build/versions/master/docs/install.html).
-
### Install TensorFlow Python dependencies
To install TensorFlow, you must install the following packages:
- * `numpy`, which is a numerical processing package that TensorFlow requires.
- * `dev`, which enables adding extensions to Python.
- * `pip`, which enables you to install and manage certain Python packages.
- * `wheel`, which enables you to manage Python compressed packages in
- the wheel (.whl) format.
+* `numpy`, which is a numerical processing package that TensorFlow requires.
+* `dev`, which enables adding extensions to Python.
+* `pip`, which enables you to install and manage certain Python packages.
+* `wheel`, which enables you to manage Python compressed packages in the wheel
+ (.whl) format.
To install these packages for Python 2.7, issue the following command:
@@ -120,68 +110,70 @@ To install these packages for Python 3.n, issue the following command:
$ <b>sudo apt-get install python3-numpy python3-dev python3-pip python3-wheel</b>
</pre>
-
### Optional: install TensorFlow for GPU prerequisites
If you are building TensorFlow without GPU support, skip this section.
-The following NVIDIA <i>hardware</i> must be installed on your system:
-
- * GPU card with CUDA Compute Capability 3.0 or higher. See
- [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus)
- for a list of supported GPU cards.
-
-The following NVIDIA <i>software</i> must be installed on your system:
-
- * [CUDA Toolkit](http://nvidia.com/cuda) (>= 8.0). We recommend version 9.0.
- For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/cuda/cuda-installation-guide-linux/).
- Ensure that you append the relevant CUDA pathnames to the
- `LD_LIBRARY_PATH` environment variable as described in the
- NVIDIA documentation.
- * [GPU drivers](http://nvidia.com/driver) supporting your version of the CUDA
- Toolkit.
- * [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 6.0). We recommend version 7.0. For details, see
- [NVIDIA's documentation](http://docs.nvidia.com/deeplearning/sdk/cudnn-install/).
- * [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
- you also need to append its path to the `LD_LIBRARY_PATH` environment
- variable:
+The following NVIDIA® <i>hardware</i> must be installed on your system:
- <pre> $ <b>export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64</b> </pre>
+* GPU card with CUDA Compute Capability 3.5 or higher. See
+ [NVIDIA documentation](https://developer.nvidia.com/cuda-gpus) for a list of
+ supported GPU cards.
+
+The following NVIDIA® <i>software</i> must be installed on your system:
+
+* [GPU drivers](http://nvidia.com/driver). CUDA 9.0 requires 384.x or higher.
+* [CUDA Toolkit](http://nvidia.com/cuda) (>= 8.0). We recommend version 9.0.
+* [cuDNN SDK](http://developer.nvidia.com/cudnn) (>= 6.0). We recommend
+ version 7.1.x.
+* [CUPTI](http://docs.nvidia.com/cuda/cupti/) ships with the CUDA Toolkit, but
+ you also need to append its path to the `LD_LIBRARY_PATH` environment
+ variable: `export
+ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64`
+* *OPTIONAL*: [NCCL 2.2](https://developer.nvidia.com/nccl) to use TensorFlow
+ with multiple GPUs.
+* *OPTIONAL*:
+ [TensorRT](http://docs.nvidia.com/deeplearning/sdk/tensorrt-install-guide/index.html)
+ which can improve latency and throughput for inference for some models.
+
+While it is possible to install the NVIDIA libraries via `apt-get` from the
+NVIDIA repository, the libraries and headers are installed in locations that
+make it difficult to configure and debug build issues. Downloading and
+installing the libraries manually or using docker
+([latest-devel-gpu](https://hub.docker.com/r/tensorflow/tensorflow/tags/)) is
+recommended.
### Next
After preparing the environment, you must now
[configure the installation](#ConfigureInstallation).
-
<a name="PrepareMac"></a>
+
## Prepare environment for macOS
Before building TensorFlow, you must install the following on your system:
- * bazel
- * TensorFlow Python dependencies.
- * optionally, NVIDIA packages to support TensorFlow for GPU.
-
+* bazel
+* TensorFlow Python dependencies.
+* optionally, NVIDIA packages to support TensorFlow for GPU.
### Install bazel
If bazel is not installed on your system, install it now by following
[these directions](https://bazel.build/versions/master/docs/install.html#mac-os-x).
-
### Install python dependencies
To build TensorFlow, you must install the following packages:
- * six
- * numpy, which is a numerical processing package that TensorFlow requires.
- * wheel, which enables you to manage Python compressed packages
- in the wheel (.whl) format.
+* six
+* numpy, which is a numerical processing package that TensorFlow requires.
+* wheel, which enables you to manage Python compressed packages in the wheel
+ (.whl) format.
-You may install the python dependencies using pip. If you don't have pip
-on your machine, we recommend using homebrew to install Python and pip as
+You may install the python dependencies using pip. If you don't have pip on your
+machine, we recommend using homebrew to install Python and pip as
[documented here](http://docs.python-guide.org/en/latest/starting/install/osx/).
If you follow these instructions, you will not need to disable SIP.
@@ -192,22 +184,23 @@ After installing pip, invoke the following commands:
Note: These are just the minimum requirements to _build_ tensorflow. Installing
the pip package will download additional packages required to _run_ it. If you
plan on executing tasks directly with `bazel` , without the pip installation,
-you may need to install additional python packages. For example, you should
-`pip install mock enum34` before running TensorFlow's tests with bazel.
+you may need to install additional python packages. For example, you should `pip
+install mock enum34` before running TensorFlow's tests with bazel.
<a name="ConfigureInstallation"></a>
+
## Configure the installation
-The root of the source tree contains a bash script named
-<code>configure</code>. This script asks you to identify the pathname of all
-relevant TensorFlow dependencies and specify other build configuration options
-such as compiler flags. You must run this script *prior* to
-creating the pip package and installing TensorFlow.
+The root of the source tree contains a bash script named <code>configure</code>.
+This script asks you to identify the pathname of all relevant TensorFlow
+dependencies and specify other build configuration options such as compiler
+flags. You must run this script *prior* to creating the pip package and
+installing TensorFlow.
-If you wish to build TensorFlow with GPU, `configure` will ask
-you to specify the version numbers of CUDA and cuDNN. If several
-versions of CUDA or cuDNN are installed on your system, explicitly select
-the desired version instead of relying on the default.
+If you wish to build TensorFlow with GPU, `configure` will ask you to specify
+the version numbers of CUDA and cuDNN. If several versions of CUDA or cuDNN are
+installed on your system, explicitly select the desired version instead of
+relying on the default.
One of the questions that `configure` will ask is as follows:
@@ -215,73 +208,117 @@ One of the questions that `configure` will ask is as follows:
Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]
</pre>
-This question refers to a later phase in which you'll use bazel to [build the
-pip package](#build-the-pip-package) or the [C/Java libraries](#BuildCorJava).
-We recommend accepting the default (`-march=native`), which will optimize the
-generated code for your local machine's CPU type. However, if you are building
-TensorFlow on one CPU type but will run TensorFlow on a different CPU type, then
-consider specifying a more specific optimization
-flag as described in [the gcc
-documentation](https://gcc.gnu.org/onlinedocs/gcc-4.5.3/gcc/i386-and-x86_002d64-Options.html).
+This question refers to a later phase in which you'll use bazel to
+[build the pip package](#build-the-pip-package) or the
+[C/Java libraries](#BuildCorJava). We recommend accepting the default
+(`-march=native`), which will optimize the generated code for your local
+machine's CPU type. However, if you are building TensorFlow on one CPU type but
+will run TensorFlow on a different CPU type, then consider specifying a more
+specific optimization flag as described in
+[the gcc documentation](https://gcc.gnu.org/onlinedocs/gcc-4.5.3/gcc/i386-and-x86_002d64-Options.html).
-Here is an example execution of the `configure` script. Note that your
-own input will likely differ from our sample input:
+Here is an example execution of the `configure` script. Note that your own input
+will likely differ from our sample input:
<pre>
$ <b>cd tensorflow</b> # cd to the top-level directory created
$ <b>./configure</b>
+You have bazel 0.15.0 installed.
Please specify the location of python. [Default is /usr/bin/python]: <b>/usr/bin/python2.7</b>
+
+
Found possible Python library paths:
/usr/local/lib/python2.7/dist-packages
/usr/lib/python2.7/dist-packages
Please input the desired Python library path to use. Default is [/usr/lib/python2.7/dist-packages]
-Using python library path: /usr/local/lib/python2.7/dist-packages
-Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]:
-Do you wish to use jemalloc as the malloc implementation? [Y/n]
-jemalloc enabled
-Do you wish to build TensorFlow with Google Cloud Platform support? [y/N]
-No Google Cloud Platform support will be enabled for TensorFlow
-Do you wish to build TensorFlow with Hadoop File System support? [y/N]
-No Hadoop File System support will be enabled for TensorFlow
-Do you wish to build TensorFlow with the XLA just-in-time compiler (experimental)? [y/N]
-No XLA support will be enabled for TensorFlow
-Do you wish to build TensorFlow with VERBS support? [y/N]
-No VERBS support will be enabled for TensorFlow
-Do you wish to build TensorFlow with OpenCL support? [y/N]
-No OpenCL support will be enabled for TensorFlow
-Do you wish to build TensorFlow with CUDA support? [y/N] <b>Y</b>
-CUDA support will be enabled for TensorFlow
-Do you want to use clang as CUDA compiler? [y/N]
-nvcc will be used as CUDA compiler
+Do you wish to build TensorFlow with jemalloc as malloc support? [Y/n]:
+jemalloc as malloc support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with Google Cloud Platform support? [Y/n]:
+Google Cloud Platform support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with Hadoop File System support? [Y/n]:
+Hadoop File System support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with Amazon AWS Platform support? [Y/n]:
+Amazon AWS Platform support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with Apache Kafka Platform support? [Y/n]:
+Apache Kafka Platform support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with XLA JIT support? [y/N]:
+No XLA JIT support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with GDR support? [y/N]:
+No GDR support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with VERBS support? [y/N]:
+No VERBS support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with OpenCL SYCL support? [y/N]:
+No OpenCL SYCL support will be enabled for TensorFlow.
+
+Do you wish to build TensorFlow with CUDA support? [y/N]: <b>Y</b>
+CUDA support will be enabled for TensorFlow.
+
Please specify the CUDA SDK version you want to use. [Leave empty to default to CUDA 9.0]: <b>9.0</b>
+
+
Please specify the location where CUDA 9.0 toolkit is installed. Refer to README.md for more details. [Default is /usr/local/cuda]:
-Please specify which gcc should be used by nvcc as the host compiler. [Default is /usr/bin/gcc]:
-Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 7.0]: <b>7</b>
+
+
+Please specify the cuDNN version you want to use. [Leave empty to default to cuDNN 7.0]: <b>7.0</b>
+
+
Please specify the location where cuDNN 7 library is installed. Refer to README.md for more details. [Default is /usr/local/cuda]:
-Please specify a list of comma-separated CUDA compute capabilities you want to build with.
+
+
+Do you wish to build TensorFlow with TensorRT support? [y/N]:
+No TensorRT support will be enabled for TensorFlow.
+
+Please specify the NCCL version you want to use. If NCLL 2.2 is not installed, then you can use version 1.3 that can be fetched automatically but it may have worse performance with multiple GPUs. [Default is 2.2]: 1.3
+
+
+Please specify a list of comma-separated Cuda compute capabilities you want to build with.
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
-Please note that each additional compute capability significantly increases your build time and binary size.
-[Default is: "3.5,5.2"]: <b>3.0</b>
-Do you wish to build TensorFlow with MPI support? [y/N]
-MPI support will not be enabled for TensorFlow
+Please note that each additional compute capability significantly increases your
+build time and binary size. [Default is: 3.5,7.0] <b>6.1</b>
+
+
+Do you want to use clang as CUDA compiler? [y/N]:
+nvcc will be used as CUDA compiler.
+
+Please specify which gcc should be used by nvcc as the host compiler. [Default is /usr/bin/gcc]:
+
+
+Do you wish to build TensorFlow with MPI support? [y/N]:
+No MPI support will be enabled for TensorFlow.
+
+Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -march=native]:
+
+
+Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]:
+Not configuring the WORKSPACE for Android builds.
+
+Preconfigured Bazel build configs. You can use any of the below by adding "--config=<>" to your build command. See tools/bazel.rc for more details.
+ --config=mkl # Build with MKL support.
+ --config=monolithic # Config for mostly static monolithic build.
Configuration finished
</pre>
-If you told `configure` to build for GPU support, then `configure`
-will create a canonical set of symbolic links to the CUDA libraries
-on your system. Therefore, every time you change the CUDA library paths,
-you must rerun the `configure` script before re-invoking
-the <code>bazel build</code> command.
+If you told `configure` to build for GPU support, then `configure` will create a
+canonical set of symbolic links to the CUDA libraries on your system. Therefore,
+every time you change the CUDA library paths, you must rerun the `configure`
+script before re-invoking the <code>bazel build</code> command.
Note the following:
- * Although it is possible to build both CUDA and non-CUDA configs
- under the same source tree, we recommend running `bazel clean` when
- switching between these two configurations in the same source tree.
- * If you don't run the `configure` script *before* running the
- `bazel build` command, the `bazel build` command will fail.
-
+* Although it is possible to build both CUDA and non-CUDA configs under the
+ same source tree, we recommend running `bazel clean` when switching between
+ these two configurations in the same source tree.
+* If you don't run the `configure` script *before* running the `bazel build`
+ command, the `bazel build` command will fail.
## Build the pip package
@@ -297,7 +334,8 @@ To build a pip package for TensorFlow with CPU-only support:
$ bazel build --config=opt //tensorflow/tools/pip_package:build_pip_package
</pre>
-To build a pip package for TensorFlow with CPU-only support for the Intel® MKL-DNN:
+To build a pip package for TensorFlow with CPU-only support for the Intel®
+MKL-DNN:
<pre>
$ bazel build --config=mkl --config=opt //tensorflow/tools/pip_package:build_pip_package
@@ -311,37 +349,35 @@ To build a pip package for TensorFlow with GPU support:
$ bazel build --config=opt --config=cuda //tensorflow/tools/pip_package:build_pip_package
</pre>
-**NOTE on gcc 5 or later:** the binary pip packages available on the
-TensorFlow website are built with gcc 4, which uses the older ABI. To
-make your build compatible with the older ABI, you need to add
-`--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"` to your `bazel build` command.
-ABI compatibility allows custom ops built against the TensorFlow pip package
-to continue to work against your built package.
+**NOTE on gcc 5 or later:** the binary pip packages available on the TensorFlow
+website are built with gcc 4, which uses the older ABI. To make your build
+compatible with the older ABI, you need to add
+`--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"` to your `bazel build` command. ABI
+compatibility allows custom ops built against the TensorFlow pip package to
+continue to work against your built package.
-<b>Tip:</b> By default, building TensorFlow from sources consumes
-a lot of RAM. If RAM is an issue on your system, you may limit RAM usage
-by specifying <code>--local_resources 2048,.5,1.0</code> while
-invoking `bazel`.
+<b>Tip:</b> By default, building TensorFlow from sources consumes a lot of RAM.
+If RAM is an issue on your system, you may limit RAM usage by specifying
+<code>--local_resources 2048,.5,1.0</code> while invoking `bazel`.
-The <code>bazel build</code> command builds a script named
-`build_pip_package`. Running this script as follows will build
-a `.whl` file within the `/tmp/tensorflow_pkg` directory:
+The <code>bazel build</code> command builds a script named `build_pip_package`.
+Running this script as follows will build a `.whl` file within the
+`/tmp/tensorflow_pkg` directory:
<pre>
$ <b>bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg</b>
</pre>
-
## Install the pip package
-Invoke `pip install` to install that pip package.
-The filename of the `.whl` file depends on your platform.
-For example, the following command will install the pip package
+Invoke `pip install` to install that pip package. The filename of the `.whl`
+file depends on your platform. For example, the following command will install
+the pip package
-for TensorFlow 1.9.0rc0 on Linux:
+for TensorFlow 1.9.0 on Linux:
<pre>
-$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.9.0rc0-py2-none-any.whl</b>
+$ <b>sudo pip install /tmp/tensorflow_pkg/tensorflow-1.9.0-py2-none-any.whl</b>
</pre>
## Validate your installation
@@ -374,26 +410,29 @@ TensorFlow programs:
To learn more, see the [TensorFlow tutorials](../tutorials/).
-If the system outputs an error message instead of a greeting, see [Common
-installation problems](#common_installation_problems).
+If the system outputs an error message instead of a greeting, see
+[Common installation problems](#common_installation_problems).
## Common build and installation problems
The build and installation problems you encounter typically depend on the
-operating system. See the "Common installation problems" section
-of one of the following guides:
-
- * @{$install_linux#common_installation_problems$Installing TensorFlow on Linux}
- * @{$install_mac#common_installation_problems$Installing TensorFlow on Mac OS}
- * @{$install_windows#common_installation_problems$Installing TensorFlow on Windows}
-
-Beyond the errors documented in those two guides, the following table
-notes additional errors specific to building TensorFlow. Note that we
-are relying on Stack Overflow as the repository for build and installation
-problems. If you encounter an error message not listed in the preceding
-two guides or in the following table, search for it on Stack Overflow. If
-Stack Overflow doesn't show the error message, ask a new question on
-Stack Overflow and specify the `tensorflow` tag.
+operating system. See the "Common installation problems" section of one of the
+following guides:
+
+* @
+ {$install_linux#common_installation_problems$Installing TensorFlow on Linux}
+* @
+ {$install_mac#common_installation_problems$Installing TensorFlow on Mac OS}
+* @
+ {$install_windows#common_installation_problems$Installing TensorFlow on Windows}
+
+Beyond the errors documented in those two guides, the following table notes
+additional errors specific to building TensorFlow. Note that we are relying on
+Stack Overflow as the repository for build and installation problems. If you
+encounter an error message not listed in the preceding two guides or in the
+following table, search for it on Stack Overflow. If Stack Overflow doesn't show
+the error message, ask a new question on Stack Overflow and specify the
+`tensorflow` tag.
<table>
<tr> <th>Stack Overflow Link</th> <th>Error Message</th> </tr>
@@ -440,6 +479,7 @@ Stack Overflow and specify the `tensorflow` tag.
</table>
## Tested source configurations
+
**Linux**
<table>
<tr><th>Version:</th><th>CPU/GPU:</th><th>Python Version:</th><th>Compiler:</th><th>Build Tools:</th><th>cuDNN:</th><th>CUDA:</th></tr>
@@ -508,6 +548,7 @@ Stack Overflow and specify the `tensorflow` tag.
</table>
<a name="BuildCorJava"></a>
+
## Build the C or Java libraries
The instructions above are tailored to building the TensorFlow Python packages.
@@ -516,10 +557,12 @@ If you're interested in building the libraries for the TensorFlow C API, do the
following:
1. Follow the steps up to [Configure the installation](#ConfigureInstallation)
-2. Build the C libraries following instructions in the [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
+2. Build the C libraries following instructions in the
+ [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
-If you're interested inv building the libraries for the TensorFlow Java API,
-do the following:
+If you're interested inv building the libraries for the TensorFlow Java API, do
+the following:
1. Follow the steps up to [Configure the installation](#ConfigureInstallation)
-2. Build the Java library following instructions in the [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
+2. Build the Java library following instructions in the
+ [README](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/lib_package/README.md).
diff --git a/tensorflow/docs_src/install/install_windows.md b/tensorflow/docs_src/install/install_windows.md
index 7b7b17ce81..e9061bf3c1 100644
--- a/tensorflow/docs_src/install/install_windows.md
+++ b/tensorflow/docs_src/install/install_windows.md
@@ -1,4 +1,4 @@
-# Installing TensorFlow on Windows
+# Install TensorFlow on Windows
This guide explains how to install TensorFlow on Windows. Although these
instructions might also work on other Windows variants, we have only
diff --git a/tensorflow/docs_src/install/migration.md b/tensorflow/docs_src/install/migration.md
index d6c31f96bd..19315ace2d 100644
--- a/tensorflow/docs_src/install/migration.md
+++ b/tensorflow/docs_src/install/migration.md
@@ -1,5 +1,4 @@
-
-# Transitioning to TensorFlow 1.0
+# Transition to TensorFlow 1.0
The APIs in TensorFlow 1.0 have changed in ways that are not all backwards
diff --git a/tensorflow/docs_src/mobile/README.md b/tensorflow/docs_src/mobile/README.md
new file mode 100644
index 0000000000..ecf4267265
--- /dev/null
+++ b/tensorflow/docs_src/mobile/README.md
@@ -0,0 +1,3 @@
+# TF Lite subsite
+
+This subsite directory lives in [tensorflow/contrib/lite/g3doc](../../contrib/lite/g3doc/).
diff --git a/tensorflow/docs_src/mobile/index.md b/tensorflow/docs_src/mobile/index.md
deleted file mode 100644
index 419ae7094a..0000000000
--- a/tensorflow/docs_src/mobile/index.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# Overview
-
-TensorFlow was designed to be a good deep learning solution for mobile
-platforms. Currently we have two solutions for deploying machine learning
-applications on mobile and embedded devices:
-@{$mobile/mobile_intro$TensorFlow for Mobile} and @{$mobile/tflite$TensorFlow Lite}.
-
-## TensorFlow Lite versus TensorFlow Mobile
-
-Here are a few of the differences between the two:
-
-- TensorFlow Lite is an evolution of TensorFlow Mobile. In most cases, apps
- developed with TensorFlow Lite will have a smaller binary size, fewer
- dependencies, and better performance.
-
-- TensorFlow Lite is in developer preview, so not all use cases are covered yet.
- We expect you to use TensorFlow Mobile to cover production cases.
-
-- TensorFlow Lite supports only a limited set of operators, so not all models
- will work on it by default. TensorFlow for Mobile has a fuller set of
- supported functionality.
-
-TensorFlow Lite provides better performance and a small binary size on mobile
-platforms as well as the ability to leverage hardware acceleration if available
-on their platforms. In addition, it has many fewer dependencies so it can be
-built and hosted on simpler, more constrained device scenarios. TensorFlow Lite
-also allows targeting accelerators through the [Neural Networks
-API](https://developer.android.com/ndk/guides/neuralnetworks/index.html).
-
-TensorFlow Lite currently has coverage for a limited set of operators. While
-TensorFlow for Mobile supports only a constrained set of ops by default, in
-principle if you use an arbitrary operator in TensorFlow, it can be customized
-to build that kernel. Thus use cases which are not currently supported by
-TensorFlow Lite should continue to use TensorFlow for Mobile. As TensorFlow Lite
-evolves, it will gain additional operators, and the decision will be easier to
-make.
diff --git a/tensorflow/docs_src/mobile/leftnav_files b/tensorflow/docs_src/mobile/leftnav_files
deleted file mode 100644
index 97340ef7e1..0000000000
--- a/tensorflow/docs_src/mobile/leftnav_files
+++ /dev/null
@@ -1,15 +0,0 @@
-index.md
-### TensorFlow Lite
-tflite/index.md
-tflite/devguide.md
-tflite/demo_android.md
-tflite/demo_ios.md
-tflite/performance.md
->>>
-### TensorFlow Mobile
-mobile_intro.md
-android_build.md
-ios_build.md
-linking_libs.md
-prepare_models.md
-optimizing.md
diff --git a/tensorflow/docs_src/performance/performance_guide.md b/tensorflow/docs_src/performance/performance_guide.md
index cb0f5ca924..dafacbe379 100644
--- a/tensorflow/docs_src/performance/performance_guide.md
+++ b/tensorflow/docs_src/performance/performance_guide.md
@@ -464,7 +464,7 @@ equal to the number of physical cores rather than logical cores.
config = tf.ConfigProto()
config.intra_op_parallelism_threads = 44
config.inter_op_parallelism_threads = 44
- tf.session(config=config)
+ tf.Session(config=config)
```
diff --git a/tensorflow/docs_src/performance/xla/developing_new_backend.md b/tensorflow/docs_src/performance/xla/developing_new_backend.md
index 74ea15bb2b..840f6983c2 100644
--- a/tensorflow/docs_src/performance/xla/developing_new_backend.md
+++ b/tensorflow/docs_src/performance/xla/developing_new_backend.md
@@ -44,7 +44,7 @@ It is possible to model a new
implementation on the existing [`xla::CPUCompiler`]
(https://www.tensorflow.org/code/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc)
and [`xla::GPUCompiler`]
-(https://www.tensorflow.org/code/tensorflow/compiler/xla/service/gpu/gpu_compiler.cc)
+(https://www.tensorflow.org/code/tensorflow/compiler/xla/service/gpu/nvptx_compiler.cc)
classes, since these already emit LLVM IR. Depending on the nature of the
hardware, it is possible that many of the LLVM IR generation aspects will have
to be changed, but a lot of code can be shared with the existing backends.
diff --git a/tensorflow/docs_src/performance/xla/operation_semantics.md b/tensorflow/docs_src/performance/xla/operation_semantics.md
index 68c427a316..fe9afc4ecb 100644
--- a/tensorflow/docs_src/performance/xla/operation_semantics.md
+++ b/tensorflow/docs_src/performance/xla/operation_semantics.md
@@ -791,8 +791,6 @@ DynamicSlice extracts a sub-array from the input array at dynamic
`size_indices`, which specify the end point of exclusive slice intervals in each
dimension: [start, start + size). The shape of `start_indices` must be rank ==
1, with dimension size equal to the rank of `operand`.
-Note: handling of out-of-bounds slice indices (generated by incorrect runtime
-calculation of 'start_indices') is currently implementation-defined.
<b> `DynamicSlice(operand, start_indices, size_indices)` </b>
@@ -812,6 +810,17 @@ calculation of 'start_indices') is currently implementation-defined.
: : : dimension to avoid wrapping modulo :
: : : dimension size. :
+The effective slice indices are computed by applying the following
+transformation for each index `i` in `[1, N)` before performing the slice:
+
+```
+start_indices[i] = clamp(start_indices[i], 0, operand.dimension_size[i] - size_indices[i])
+```
+
+This ensures that the extracted slice is always in-bounds with respect to the
+operand array. If the slice is in-bounds before the transformation is applied,
+the transformation has no effect.
+
1-dimensional example:
```
@@ -847,8 +856,6 @@ The shape of `update` determines the shape of the sub-array of the result which
is updated.
The shape of `start_indices` must be rank == 1, with dimension size equal to
the rank of `operand`.
-Note: handling of out-of-bounds slice indices (generated by incorrect runtime
-calculation of 'start_indices') is currently implementation-defined.
<b> `DynamicUpdateSlice(operand, update, start_indices)` </b>
@@ -866,6 +873,17 @@ calculation of 'start_indices') is currently implementation-defined.
: : : dimension. Value must be greater than or equal :
: : : to zero. :
+The effective slice indices are computed by applying the following
+transformation for each index `i` in `[1, N)` before performing the slice:
+
+```
+start_indices[i] = clamp(start_indices[i], 0, operand.dimension_size[i] - update.dimension_size[i])
+```
+
+This ensures that the updated slice is always in-bounds with respect to the
+operand array. If the slice is in-bounds before the transformation is applied,
+the transformation has no effect.
+
1-dimensional example:
```
@@ -1293,6 +1311,19 @@ Infeed of the device.
> which case the compiler will provide information about how the Infeed
> operations are serialized in the compiled program.
+## Iota
+
+<b> `Iota()` </b>
+
+Builds a constant literal on device rather than a potentially large host
+transfer. Creates a rank 1 tensor of values starting at zero and incrementing
+by one.
+
+Arguments | Type | Semantics
+------------------ | --------------- | ---------------------------
+`type` | `PrimitiveType` | type U
+`size` | `int64` | The number of elements in the tensor.
+
## Map
See also
@@ -1303,7 +1334,7 @@ See also
| Arguments | Type | Semantics |
| ----------------- | ---------------------- | ------------------------------ |
| `operands` | sequence of N `XlaOp`s | N arrays of types T_0..T_{N-1} |
-| `computation` | `XlaComputation` | computation of type `T_0, T_1, |
+| `computation` | `XlaComputation` | computation of type `T_0, T_1, |
: : : ..., T_{N + M -1} -> S` with N :
: : : parameters of type T and M of :
: : : arbitrary type :
diff --git a/tensorflow/docs_src/tutorials/_index.yaml b/tensorflow/docs_src/tutorials/_index.yaml
index 07d561b8a2..9534114689 100644
--- a/tensorflow/docs_src/tutorials/_index.yaml
+++ b/tensorflow/docs_src/tutorials/_index.yaml
@@ -2,6 +2,7 @@ project_path: /_project.yaml
book_path: /_book.yaml
description: <!--no description-->
landing_page:
+ custom_css_path: /site-assets/css/style.css
show_side_navs: True
rows:
- description: >
@@ -14,57 +15,6 @@ landing_page:
</p>
items:
- custom_html: >
- <style>
- .tfo-button-primary {
- background-color: #fca851;
- }
- .tfo-button-primary:hover {
- background-color: #ef6c02;
- }
-
- a.colab-button {
- display: inline-block;
- background: rgba(255, 255, 255, 0.75);
- padding: 4px 8px;
- border-radius: 4px;
- font-size: 11px!important;
- text-decoration: none;
- color:#aaa;border: none;
- font-weight: 300;
- border: solid 1px rgba(0, 0, 0, 0.08);
- border-bottom-color: rgba(0, 0, 0, 0.15);
- text-transform: uppercase;
- line-height: 16px
- }
- a.colab-button:hover {
- color: #666;
- background: white;
- border-color: rgba(0, 0, 0, 0.2);
- }
- a.colab-button span {
- background-image: url("/images/colab_logo_button.svg");
- background-repeat:no-repeat;background-size:20px;
- background-position-y:2px;display:inline-block;
- padding-left:24px;border-radius:4px;
- text-decoration:none;
- }
-
- /* adjust code block for smaller screens */
- @media screen and (max-width: 1000px) {
- .tfo-landing-row-item-code-block {
- flex-direction: column !important;
- }
- .tfo-landing-row-item-code-block > .devsite-landing-row-item-code {
- /*display: none;*/
- width: 100%;
- }
- }
- @media screen and (max-width: 720px) {
- .tfo-landing-row-item-code-block {
- display: none;
- }
- }
- </style>
<div class="devsite-landing-row-item-description">
<h3 class="hide-from-toc">Learn and use ML</h3>
<div class="devsite-landing-row-item-description-content">
@@ -175,7 +125,7 @@ landing_page:
<a href="/guide/estimators">Estimators guide</a>.
</p>
<ol style="padding-left: 20px;">
- <li><a href="/guide/premade_estimators">Premade Estimators guide</a></li>
+ <li><a href="/tutorials/estimators/linear">Build a linear model with Estimators</a></li>
<li><a href="https://github.com/tensorflow/models/tree/master/official/wide_deep" class="external">Wide and deep learning with Estimators</a></li>
<li><a href="https://github.com/tensorflow/models/tree/master/official/boosted_trees" class="external">Boosted trees</a></li>
<li><a href="/hub/tutorials/text_classification_with_tf_hub">How to build a simple text classifier with TF-Hub</a></li>
diff --git a/tensorflow/docs_src/tutorials/_toc.yaml b/tensorflow/docs_src/tutorials/_toc.yaml
index 4db97e35fc..d33869af6e 100644
--- a/tensorflow/docs_src/tutorials/_toc.yaml
+++ b/tensorflow/docs_src/tutorials/_toc.yaml
@@ -44,6 +44,8 @@ toc:
- title: ML at production scale
style: accordion
section:
+ - title: Linear model with Estimators
+ path: /tutorials/estimators/linear
- title: Wide and deep learning
path: https://github.com/tensorflow/models/tree/master/official/wide_deep
status: external
diff --git a/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md b/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
index b45fbefac0..b564a27ecf 100644
--- a/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
+++ b/tensorflow/docs_src/tutorials/eager/custom_training_walkthrough.md
@@ -1,3 +1,3 @@
# Custom training: walkthrough
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/eager.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/eager/custom_training_walkthrough.ipynb)
diff --git a/tensorflow/docs_src/tutorials/eager/index.md b/tensorflow/docs_src/tutorials/eager/index.md
index 5445e0c343..a13b396094 100644
--- a/tensorflow/docs_src/tutorials/eager/index.md
+++ b/tensorflow/docs_src/tutorials/eager/index.md
@@ -5,7 +5,7 @@ operations. Write custom layers, forward passes, and training loops with
auto&nbsp;differentiation. Start with these notebooks, then read the
[eager execution guide](../../guide/eager).
-1. <span>[Eager execution](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_intro.ipynb){:.external}</span>
+1. <span>[Eager execution](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/eager_basics.ipynb){:.external}</span>
2. <span>[Automatic differentiation and gradient tape](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/automatic_differentiation.ipynb){:.external}</span>
3. <span>[Custom training: basics](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_training.ipynb){:.external}</span>
4. <span>[Custom layers](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/notebooks/custom_layers.ipynb){:.external}</span>
diff --git a/tensorflow/docs_src/tutorials/estimators/linear.md b/tensorflow/docs_src/tutorials/estimators/linear.md
new file mode 100644
index 0000000000..067a33ac03
--- /dev/null
+++ b/tensorflow/docs_src/tutorials/estimators/linear.md
@@ -0,0 +1,3 @@
+# Build a linear model with Estimators
+
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/estimators/linear.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/basic_classification.md b/tensorflow/docs_src/tutorials/keras/basic_classification.md
index 91bbd85b24..e028af99b9 100644
--- a/tensorflow/docs_src/tutorials/keras/basic_classification.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_classification.md
@@ -1,3 +1,3 @@
# Basic Classification
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_classification.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_classification.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/basic_regression.md b/tensorflow/docs_src/tutorials/keras/basic_regression.md
index a535f22f5a..8721b7aca1 100644
--- a/tensorflow/docs_src/tutorials/keras/basic_regression.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_regression.md
@@ -1,3 +1,3 @@
# Basic Regression
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_regression.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_regression.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/basic_text_classification.md b/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
index 7c5d4f7896..c2a16bdd20 100644
--- a/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
+++ b/tensorflow/docs_src/tutorials/keras/basic_text_classification.md
@@ -1,3 +1,3 @@
# Basic Text Classification
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/basic_text_classification.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/basic_text_classification.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md b/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
index e5b5ae7b5a..f07f3addd8 100644
--- a/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
+++ b/tensorflow/docs_src/tutorials/keras/overfit_and_underfit.md
@@ -1,3 +1,3 @@
# Overfitting and Underfitting
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/overfit_and_underfit.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/overfit_and_underfit.ipynb)
diff --git a/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md b/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
index 44b3772945..a799b379a0 100644
--- a/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
+++ b/tensorflow/docs_src/tutorials/keras/save_and_restore_models.md
@@ -1,3 +1,3 @@
# Save and restore Models
-[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/get_started/save_and_restore_models.ipynb)
+[Colab notebook](https://colab.research.google.com/github/tensorflow/models/blob/master/samples/core/tutorials/keras/save_and_restore_models.ipynb)
diff --git a/tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowObjectDetectionAPIModel.java b/tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowObjectDetectionAPIModel.java
index 614d3c7dd7..9739e58018 100644
--- a/tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowObjectDetectionAPIModel.java
+++ b/tensorflow/examples/android/src/org/tensorflow/demo/TensorFlowObjectDetectionAPIModel.java
@@ -137,7 +137,7 @@ public class TensorFlowObjectDetectionAPIModel implements Classifier {
Trace.beginSection("recognizeImage");
Trace.beginSection("preprocessBitmap");
- // Preprocess the image data from 0-255 int to normalized float based
+ // Preprocess the image data to extract R, G and B bytes from int of form 0x00RRGGBB
// on the provided parameters.
bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight());
diff --git a/tensorflow/examples/speech_commands/freeze.py b/tensorflow/examples/speech_commands/freeze.py
index 7657b23c60..89e790d4e4 100644
--- a/tensorflow/examples/speech_commands/freeze.py
+++ b/tensorflow/examples/speech_commands/freeze.py
@@ -130,7 +130,7 @@ def main(_):
FLAGS.clip_stride_ms, FLAGS.window_size_ms, FLAGS.window_stride_ms,
FLAGS.feature_bin_count, FLAGS.model_architecture, FLAGS.preprocess)
if FLAGS.quantize:
- tf.contrib.quantize.create_training_graph(quant_delay=0)
+ tf.contrib.quantize.create_eval_graph()
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
# Turn all the variables into inline constants inside the graph and save it.
diff --git a/tensorflow/examples/speech_commands/models.py b/tensorflow/examples/speech_commands/models.py
index 65ae3b1511..4d1454be0d 100644
--- a/tensorflow/examples/speech_commands/models.py
+++ b/tensorflow/examples/speech_commands/models.py
@@ -302,7 +302,7 @@ def create_conv_model(fingerprint_input, model_settings, is_training):
label_count = model_settings['label_count']
final_fc_weights = tf.get_variable(
name='final_fc_weights',
- initializer=tf.truncated_normal_initializer,
+ initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[second_conv_element_count, label_count])
final_fc_bias = tf.get_variable(
name='final_fc_bias',
diff --git a/tensorflow/examples/tutorials/mnist/mnist_deep.py b/tensorflow/examples/tutorials/mnist/mnist_deep.py
index 1e0294db27..5d8d8d84fe 100644
--- a/tensorflow/examples/tutorials/mnist/mnist_deep.py
+++ b/tensorflow/examples/tutorials/mnist/mnist_deep.py
@@ -34,6 +34,8 @@ from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
+import numpy
+
FLAGS = None
@@ -164,8 +166,15 @@ def main(_):
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
- print('test accuracy %g' % accuracy.eval(feed_dict={
- x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
+ # compute in batches to avoid OOM on GPUs
+ accuracy_l = []
+ for _ in range(20):
+ batch = mnist.test.next_batch(500, shuffle=False)
+ accuracy_l.append(accuracy.eval(feed_dict={x: batch[0],
+ y_: batch[1],
+ keep_prob: 1.0}))
+ print('test accuracy %g' % numpy.mean(accuracy_l))
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
diff --git a/tensorflow/go/README.md b/tensorflow/go/README.md
index e251356ec8..288a32530a 100644
--- a/tensorflow/go/README.md
+++ b/tensorflow/go/README.md
@@ -46,7 +46,7 @@ from source.
```sh
cd ${GOPATH}/src/github.com/tensorflow/tensorflow
./configure
- bazel build --config opt //tensorflow:libtensorflow.so
+ bazel build -c opt //tensorflow:libtensorflow.so
```
This can take a while (tens of minutes, more if also building for GPU).
diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go
index f49e1cecaf..18d7425323 100644
--- a/tensorflow/go/op/wrappers.go
+++ b/tensorflow/go/op/wrappers.go
@@ -327,12 +327,12 @@ func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQua
return op.Output(0)
}
-// Scatter `updates` into a new (initially zero) tensor according to `indices`.
+// Scatter `updates` into a new tensor according to `indices`.
//
-// Creates a new tensor by applying sparse `updates` to individual
-// values or slices within a zero tensor of the given `shape` according to
-// indices. This operator is the inverse of the @{tf.gather_nd} operator which
-// extracts values or slices from a given tensor.
+// Creates a new tensor by applying sparse `updates` to individual values or
+// slices within a tensor (initially zero for numeric, empty for string) of
+// the given `shape` according to indices. This operator is the inverse of the
+// @{tf.gather_nd} operator which extracts values or slices from a given tensor.
//
// **WARNING**: The order in which updates are applied is nondeterministic, so the
// output will be nondeterministic if `indices` contains duplicates.
@@ -430,7 +430,8 @@ type QuantizeAndDequantizeV2Attr func(optionalAttr)
// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
//
-// value: If the quantization is signed or unsigned.
+// value: Whether the quantization is signed or unsigned. (actually this parameter should
+// have been called <b>`signed_output`</b>)
// If not specified, defaults to true
func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
@@ -450,7 +451,7 @@ func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
//
-// value: If the range is given or should be computed from the tensor.
+// value: Whether the range is given or should be determined from the `input` tensor.
// If not specified, defaults to false
func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
return func(m optionalAttr) {
@@ -461,61 +462,64 @@ func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
// Quantizes then dequantizes a tensor.
//
// This op simulates the precision loss from the quantized forward pass by:
+//
// 1. Quantizing the tensor to fixed point numbers, which should match the target
// quantization method when it is used in inference.
// 2. Dequantizing it back to floating point numbers for the following ops, most
// likely matmul.
//
-// There are different ways to quantize. This version does not use the full range
-// of the output type, choosing to elide the lowest possible value for symmetry
-// (e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
-// quantization), so that 0.0 maps to 0.
-//
-// To perform this op, we first find the range of values in our tensor. The range
-// we use is always centered on 0, so we find m such that
-//
-// 1. m = max(abs(input_min), abs(input_max)) if range_given is true,
-// 2. m = max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
+// There are different ways to quantize. This version uses only scaling, so 0.0
+// maps to 0.
//
-// Our input tensor range is then [-m, m].
+// From the specified 'num_bits' in the quantized output type, it determines
+// minimum and maximum representable quantized values.
//
-// Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
-// If signed_input is true, this is
+// e.g.
//
-// [min_fixed, max_fixed ] =
-// [-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
+// * [-128, 127] for signed, num_bits = 8, or
+// * [0, 255] for unsigned, num_bits = 8.
//
-// Otherwise, if signed_input is false, the fixed-point range is
+// If range_given == False, the initial input_min, input_max will be determined
+// automatically as the minimum and maximum values in the input tensor, otherwise
+// the specified values of input_min, input_max are used.
//
-// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
+// Note: If the input_min, input_max are specified, they do not need to equal the
+// actual minimum and maximum values in the tensor. e.g. in some cases it may be
+// beneficial to specify these values such that the low probability extremes of the
+// input distribution are clipped.
//
-// From this we compute our scaling factor, s:
-//
-// s = (max_fixed - min_fixed) / (2 * m).
+// This op determines the maximum scale_factor that would map the initial
+// [input_min, input_max] range to a range that lies within the representable
+// quantized range.
//
-// Now we can quantize and dequantize the elements of our tensor. An element e
-// is transformed into e':
+// It determines the scale from one of input_min and input_max, then updates the
+// other one to maximize the respresentable range.
//
-// e' = (e * s).round_to_nearest() / s.
+// e.g.
//
-// Note that we have a different number of buckets in the signed vs. unsigned
-// cases. For example, if num_bits == 8, we get 254 buckets in the signed case
-// vs. 255 in the unsigned case.
+// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
+// 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
+// would update input_max to be 127 / 12.8 = 9.921875
+// * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
+// 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
+// would update input_min to be 128.0 / 12.7 = -10.07874
+// * if the output is unsigned, input_min is forced to be 0, and only the
+// specified input_max is used.
//
-// For example, suppose num_bits = 8 and m = 1. Then
+// After determining the scale_factor and updating the input range, it applies the
+// following to each value in the 'input' tensor.
//
-// [min_fixed, max_fixed] = [-127, 127], and
-// s = (127 + 127) / 2 = 127.
+// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
//
-// Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
-// {-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
//
// Arguments:
// input: Tensor to quantize and then dequantize.
-// input_min: If range_given, this is the min of the range, otherwise this input
-// will be ignored.
-// input_max: If range_given, this is the max of the range, otherwise this input
-// will be ignored.
+// input_min: If `range_given == True`, this specifies the minimum input value that needs to
+// be represented, otherwise it is determined from the min value of the `input`
+// tensor.
+// input_max: If `range_given == True`, this specifies the maximum input value that needs to
+// be represented, otherwise it is determined from the max value of the `input`
+// tensor.
func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
if scope.Err() != nil {
return
@@ -2249,7 +2253,7 @@ func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Ou
// (K-1)-dimensional tensor of indices into `params`, where each element defines a
// slice of `params`:
//
-// output[i_0, ..., i_{K-2}] = params[indices[i0, ..., i_{K-2}]]
+// output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
//
// Whereas in @{tf.gather} `indices` defines slices into the first
// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
@@ -3015,6 +3019,45 @@ func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.O
return op.Output(0)
}
+// Broadcast an array for a compatible shape.
+//
+// Broadcasting is the process of making arrays to have compatible shapes
+// for arithmetic operations. Two shapes are compatible if for each
+// dimension pair they are either equal or one of them is one. When trying
+// to broadcast a Tensor to a shape, it starts with the trailing dimensions,
+// and works its way forward.
+//
+// For example,
+// ```
+// >>> x = tf.constant([1, 2, 3])
+// >>> y = tf.broadcast_to(x, [3, 3])
+// >>> sess.run(y)
+// array([[1, 2, 3],
+// [1, 2, 3],
+// [1, 2, 3]], dtype=int32)
+// ```
+// In the above example, the input Tensor with the shape of `[1, 3]`
+// is broadcasted to output Tensor with shape of `[3, 3]`.
+//
+// Arguments:
+// input: A Tensor to broadcast.
+// shape: An 1-D `int` Tensor. The shape of the desired output.
+//
+// Returns A Tensor.
+func BroadcastTo(scope *Scope, input tf.Output, shape tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "BroadcastTo",
+ Input: []tf.Input{
+ input, shape,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Converts a flat index or array of flat indices into a tuple of
//
// coordinate arrays.
@@ -3506,7 +3549,7 @@ func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
// segments.
//
// Computes a tensor such that
-// `(output[i] = sum_{j...} data[j...]` where the sum is over tuples `j...` such
+// \\(output[i] = sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
// that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids`
// need not be sorted and need not cover all values in the full
// range of valid values.
@@ -3875,11 +3918,13 @@ func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
//
// window_size: A scalar representing the number of elements in the
// sliding window.
-// stride: A scalar representing the steps moving the sliding window
-// forward in one iteration. It must be in `[1, window_size)`.
+// window_shift: A scalar representing the steps moving the sliding window
+// forward in one iteration. It must be positive.
+// window_stride: A scalar representing the stride of the input elements of the sliding window.
+// It must be positive.
//
//
-func SlideDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+func SlideDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {
return
}
@@ -3887,7 +3932,7 @@ func SlideDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output,
opspec := tf.OpSpec{
Type: "SlideDataset",
Input: []tf.Input{
- input_dataset, window_size, stride,
+ input_dataset, window_size, window_shift, window_stride,
},
Attrs: attrs,
}
@@ -4902,6 +4947,21 @@ func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
return op.Output(0)
}
+// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
+func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "RandomGammaGrad",
+ Input: []tf.Input{
+ alpha, sample,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Computes square of x element-wise.
//
// I.e., \\(y = x * x = x^2\\).
@@ -5650,7 +5710,7 @@ func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
//
// For each batch `i` and class `j` we have
//
-// softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))
+// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
//
// Arguments:
// logits: 2-D with shape `[batch_size, num_classes]`.
@@ -6828,8 +6888,9 @@ type CropAndResizeAttr func(optionalAttr)
// CropAndResizeMethod sets the optional method attribute to value.
//
-// value: A string specifying the interpolation method. Only 'bilinear' is
-// supported for now.
+// value: A string specifying the sampling method for resizing. It can be either
+// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling
+// methods are supported: Bilinear and Nearest Neighbor.
// If not specified, defaults to "bilinear"
func CropAndResizeMethod(value string) CropAndResizeAttr {
return func(m optionalAttr) {
@@ -6847,19 +6908,23 @@ func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
}
}
-// Extracts crops from the input image tensor and bilinearly resizes them (possibly
+// Extracts crops from the input image tensor and resizes them.
//
-// with aspect ratio change) to a common output size specified by `crop_size`. This
-// is more general than the `crop_to_bounding_box` op which extracts a fixed size
-// slice from the input image and does not allow resizing or aspect ratio change.
+// Extracts crops from the input image tensor and resizes them using bilinear
+// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
+// common output size specified by `crop_size`. This is more general than the
+// `crop_to_bounding_box` op which extracts a fixed size slice from the input image
+// and does not allow resizing or aspect ratio change.
//
// Returns a tensor with `crops` from the input `image` at positions defined at the
// bounding box locations in `boxes`. The cropped boxes are all resized (with
-// bilinear interpolation) to a fixed `size = [crop_height, crop_width]`. The
-// result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The
-// resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the
-// method will give identical results to using `tf.image.resize_bilinear()`
-// with `align_corners=True`.
+// bilinear or nearest neighbor interpolation) to a fixed
+// `size = [crop_height, crop_width]`. The result is a 4-D tensor
+// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
+// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
+// results to using `tf.image.resize_bilinear()` or
+// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with
+// `align_corners=True`.
//
// Arguments:
// image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
@@ -7242,6 +7307,26 @@ func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (ou
return op.Output(0)
}
+// Computes the Bessel i1e function of `x` element-wise.
+//
+// Exponentially scaled modified Bessel function of order 0 defined as
+// `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
+//
+// This function is faster and numerically stabler than `bessel_i1(x)`.
+func BesselI1e(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "BesselI1e",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Transforms a Tensor into a serialized TensorProto proto.
//
// Arguments:
@@ -8437,6 +8522,21 @@ func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPe
return op.Output(0)
}
+// Computes the gradient of `igamma(a, x)` wrt `a`.
+func IgammaGradA(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "IgammaGradA",
+ Input: []tf.Input{
+ a, x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Converts each string in the input Tensor to its hash mod by a number of buckets.
//
// The hash function is deterministic on the content of the string within the
@@ -9101,6 +9201,85 @@ func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, upd
return scope.AddOperation(opspec)
}
+// ResourceScatterNdAddAttr is an optional argument to ResourceScatterNdAdd.
+type ResourceScatterNdAddAttr func(optionalAttr)
+
+// ResourceScatterNdAddUseLocking sets the optional use_locking attribute to value.
+//
+// value: An optional bool. Defaults to True. If True, the assignment will
+// be protected by a lock; otherwise the behavior is undefined,
+// but may exhibit less contention.
+// If not specified, defaults to true
+func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Adds sparse `updates` to individual values or slices within a given
+//
+// variable according to `indices`.
+//
+// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
+//
+// `indices` must be integer tensor, containing indices into `ref`.
+// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+//
+// The innermost dimension of `indices` (with length `K`) corresponds to
+// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
+// dimension of `ref`.
+//
+// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
+//
+// ```
+// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
+// ```
+//
+// For example, say we want to update 4 scattered elements to a rank-1 tensor to
+// 8 elements. In Python, that update would look like this:
+//
+// ```python
+// ref = tfe.Variable([1, 2, 3, 4, 5, 6, 7, 8])
+// indices = tf.constant([[4], [3], [1] ,[7]])
+// updates = tf.constant([9, 10, 11, 12])
+// update = tf.scatter_nd_add(ref, indices, updates)
+// with tf.Session() as sess:
+// print sess.run(update)
+// ```
+//
+// The resulting update to ref would look like this:
+//
+// [1, 12, 3, 14, 14, 6, 7, 20]
+//
+// See @{tf.scatter_nd} for more details about how to make updates to
+// slices.
+//
+// Arguments:
+// ref: A resource handle. Must be from a VarHandleOp.
+// indices: A Tensor. Must be one of the following types: int32, int64.
+// A tensor of indices into ref.
+// updates: A Tensor. Must have the same type as ref. A tensor of
+// values to add to ref.
+//
+// Returns the created operation.
+func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdAddAttr) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "ResourceScatterNdAdd",
+ Input: []tf.Input{
+ ref, indices, updates,
+ },
+ Attrs: attrs,
+ }
+ return scope.AddOperation(opspec)
+}
+
// Mutually reduces multiple tensors of identical type and shape.
func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, merge_op string, final_op string, subdiv_offsets []int64) (data tf.Output) {
if scope.Err() != nil {
@@ -9161,6 +9340,68 @@ func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, option
return op.Output(0)
}
+// StringSplitV2Attr is an optional argument to StringSplitV2.
+type StringSplitV2Attr func(optionalAttr)
+
+// StringSplitV2Maxsplit sets the optional maxsplit attribute to value.
+//
+// value: An `int`. If `maxsplit > 0`, limit of the split of the result.
+// If not specified, defaults to -1
+func StringSplitV2Maxsplit(value int64) StringSplitV2Attr {
+ return func(m optionalAttr) {
+ m["maxsplit"] = value
+ }
+}
+
+// Split elements of `source` based on `sep` into a `SparseTensor`.
+//
+// Let N be the size of source (typically N will be the batch size). Split each
+// element of `source` based on `sep` and return a `SparseTensor`
+// containing the split tokens. Empty tokens are ignored.
+//
+// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
+// then the output will be
+// ```
+// st.indices = [0, 0;
+// 0, 1;
+// 1, 0;
+// 1, 1;
+// 1, 2]
+// st.shape = [2, 3]
+// st.values = ['hello', 'world', 'a', 'b', 'c']
+// ```
+//
+// If `sep` is given, consecutive delimiters are not grouped together and are
+// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
+// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
+// string, consecutive whitespace are regarded as a single separator, and the
+// result will contain no empty strings at the startor end if the string has
+// leading or trailing whitespace.
+//
+// Note that the above mentioned behavior matches python's str.split.
+//
+// Arguments:
+// input: `1-D` string `Tensor`, the strings to split.
+// sep: `0-D` string `Tensor`, the delimiter character.
+func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "StringSplitV2",
+ Input: []tf.Input{
+ input, sep,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2)
+}
+
// MaxPoolAttr is an optional argument to MaxPool.
type MaxPoolAttr func(optionalAttr)
@@ -9245,9 +9486,11 @@ func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
// Multiply matrix "a" by matrix "b".
//
// The inputs must be two-dimensional matrices and the inner dimension of "a" must
-// match the outer dimension of "b". This op is optimized for the case where at
-// least one of "a" or "b" is sparse. The breakeven for using this versus a dense
-// matrix multiply on one platform was 30% zero values in the sparse matrix.
+// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
+// `SparseTensor`s. This op is optimized for the case where at least one of "a" or
+// "b" is sparse, in the sense that they have a large proportion of zero values.
+// The breakeven for using this versus a dense matrix multiply on one platform was
+// 30% zero values in the sparse matrix.
//
// The gradient computation of this operation will only take advantage of sparsity
// in the input gradient when that gradient comes from a Relu.
@@ -9878,6 +10121,51 @@ func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize
return op.Output(0)
}
+// Greedily selects a subset of bounding boxes in descending order of score,
+//
+// pruning away boxes that have high overlaps
+// with previously selected boxes. Bounding boxes with score less than
+// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix,
+// which allows for defining a custom overlap criterium (eg. intersection over union,
+// intersection over area, etc.).
+//
+// The output of this operation is a set of integers indexing into the input
+// collection of bounding boxes representing the selected boxes. The bounding
+// box coordinates corresponding to the selected indices can then be obtained
+// using the `tf.gather operation`. For example:
+//
+// selected_indices = tf.image.non_max_suppression_with_overlaps(
+// overlaps, scores, max_output_size, overlap_threshold, score_threshold)
+// selected_boxes = tf.gather(boxes, selected_indices)
+//
+// Arguments:
+// overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
+// the n-by-n box overlap values.
+// scores: A 1-D float tensor of shape `[num_boxes]` representing a single
+// score corresponding to each box (each row of boxes).
+// max_output_size: A scalar integer tensor representing the maximum number of
+// boxes to be selected by non max suppression.
+// overlap_threshold: A 0-D float tensor representing the threshold for deciding whether
+// boxes overlap too.
+// score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
+// boxes based on score.
+//
+// Returns A 1-D integer tensor of shape `[M]` representing the selected
+// indices from the boxes tensor, where `M <= max_output_size`.
+func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, overlap_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "NonMaxSuppressionWithOverlaps",
+ Input: []tf.Input{
+ overlaps, scores, max_output_size, overlap_threshold, score_threshold,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// StageClearAttr is an optional argument to StageClear.
type StageClearAttr func(optionalAttr)
@@ -10170,6 +10458,57 @@ func Atan(scope *Scope, x tf.Output) (y tf.Output) {
return op.Output(0)
}
+// ResourceApplyAdaMaxAttr is an optional argument to ResourceApplyAdaMax.
+type ResourceApplyAdaMaxAttr func(optionalAttr)
+
+// ResourceApplyAdaMaxUseLocking sets the optional use_locking attribute to value.
+//
+// value: If `True`, updating of the var, m, and v tensors will be protected
+// by a lock; otherwise the behavior is undefined, but may exhibit less
+// contention.
+// If not specified, defaults to false
+func ResourceApplyAdaMaxUseLocking(value bool) ResourceApplyAdaMaxAttr {
+ return func(m optionalAttr) {
+ m["use_locking"] = value
+ }
+}
+
+// Update '*var' according to the AdaMax algorithm.
+//
+// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
+// v_t <- max(beta2 * v_{t-1}, abs(g))
+// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
+//
+// Arguments:
+// var_: Should be from a Variable().
+// m: Should be from a Variable().
+// v: Should be from a Variable().
+// beta1_power: Must be a scalar.
+// lr: Scaling factor. Must be a scalar.
+// beta1: Momentum factor. Must be a scalar.
+// beta2: Momentum factor. Must be a scalar.
+// epsilon: Ridge term. Must be a scalar.
+// grad: The gradient.
+//
+// Returns the created operation.
+func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdaMaxAttr) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "ResourceApplyAdaMax",
+ Input: []tf.Input{
+ var_, m, v, beta1_power, lr, beta1, beta2, epsilon, grad,
+ },
+ Attrs: attrs,
+ }
+ return scope.AddOperation(opspec)
+}
+
// Encode audio data using the WAV file format.
//
// This operation will generate a string suitable to be saved out to create a .wav
@@ -10778,6 +11117,120 @@ func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Out
return scope.AddOperation(opspec)
}
+// CudnnRNNBackpropV2Attr is an optional argument to CudnnRNNBackpropV2.
+type CudnnRNNBackpropV2Attr func(optionalAttr)
+
+// CudnnRNNBackpropV2RnnMode sets the optional rnn_mode attribute to value.
+// If not specified, defaults to "lstm"
+func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr {
+ return func(m optionalAttr) {
+ m["rnn_mode"] = value
+ }
+}
+
+// CudnnRNNBackpropV2InputMode sets the optional input_mode attribute to value.
+// If not specified, defaults to "linear_input"
+func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr {
+ return func(m optionalAttr) {
+ m["input_mode"] = value
+ }
+}
+
+// CudnnRNNBackpropV2Direction sets the optional direction attribute to value.
+// If not specified, defaults to "unidirectional"
+func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr {
+ return func(m optionalAttr) {
+ m["direction"] = value
+ }
+}
+
+// CudnnRNNBackpropV2Dropout sets the optional dropout attribute to value.
+// If not specified, defaults to 0
+func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr {
+ return func(m optionalAttr) {
+ m["dropout"] = value
+ }
+}
+
+// CudnnRNNBackpropV2Seed sets the optional seed attribute to value.
+// If not specified, defaults to 0
+func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// CudnnRNNBackpropV2Seed2 sets the optional seed2 attribute to value.
+// If not specified, defaults to 0
+func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// Backprop step of CudnnRNN.
+//
+// Compute the backprop of both data and weights in a RNN. Takes an extra
+// "host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN
+// cudnnRNNAlgo_t and cudnnMathType_t.
+//
+// rnn_mode: Indicates the type of the RNN model.
+// input_mode: Indicates whether there is a linear projection between the input and
+// the actual computation before the first layer. 'skip_input' is only allowed
+// when input_size == num_units; 'auto_select' implies 'skip_input' when
+// input_size == num_units; otherwise, it implies 'linear_input'.
+// direction: Indicates whether a bidirectional model will be used. Should be
+// "unidirectional" or "bidirectional".
+// dropout: Dropout probability. When set to 0., dropout is disabled.
+// seed: The 1st part of a seed to initialize dropout.
+// seed2: The 2nd part of a seed to initialize dropout.
+// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
+// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
+// num_units].
+// input_c: For LSTM, a 3-D tensor with the shape of
+// [num_layer * dir, batch, num_units]. For other models, it is ignored.
+// params: A 1-D tensor that contains the weights and biases in an opaque layout.
+// The size must be created through CudnnRNNParamsSize, and initialized
+// separately. Note that they might not be compatible across different
+// generations. So it is a good idea to save and restore
+// output: A 3-D tensor with the shape of [seq_length, batch_size,
+// dir * num_units].
+// output_h: The same shape has input_h.
+// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
+// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
+// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
+// pass.
+// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
+// pass.
+// reserve_space: The same reserve_space produced in the forward operation.
+// host_reserved: The same host_reserved produced in the forward operation.
+// input_backprop: The backprop to input in the forward pass. Has the same shape
+// as input.
+// input_h_backprop: The backprop to input_h in the forward pass. Has the same
+// shape as input_h.
+// input_c_backprop: The backprop to input_c in the forward pass. Has the same
+// shape as input_c.
+// params_backprop: The backprop to the params buffer in the forward pass. Has the
+// same shape as params.
+func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV2Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "CudnnRNNBackpropV2",
+ Input: []tf.Input{
+ input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
+}
+
// Locks a mutex resource. The output is the lock. So long as the lock tensor
//
// is alive, any other request to use `MutexLock` with this mutex will wait.
@@ -10965,6 +11418,34 @@ func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, o
return op.Output(0)
}
+// Check if the input matches the regex pattern.
+//
+// The input is a string tensor of any shape. The pattern is a scalar
+// string tensor which is applied to every element of the input tensor.
+// The boolean values (True or False) of the output tensor indicate
+// if the input matches the regex pattern provided.
+//
+// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
+//
+// Arguments:
+// input: A string tensor of the text to be processed.
+// pattern: A 1-D string tensor of the regular expression to match the input.
+//
+// Returns A bool tensor with the same shape as `input`.
+func RegexFullMatch(scope *Scope, input tf.Output, pattern tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "RegexFullMatch",
+ Input: []tf.Input{
+ input, pattern,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Says whether the targets are in the top `K` predictions.
//
// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
@@ -11457,7 +11938,7 @@ func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistorted
// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
//
// value: The cropped area of the image must contain a fraction of the
-// supplied image within in this range.
+// supplied image within this range.
// If not specified, defaults to <f:0.05 f:1 >
func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
return func(m optionalAttr) {
@@ -12229,6 +12710,7 @@ func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output) (output tf.Outp
// [0, 0, 2, 2, 0, 0]
// [0, 0, 0, 0, 0, 0]]
// ```
+//
func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
@@ -13547,9 +14029,11 @@ func ReduceJoinSeparator(value string) ReduceJoinAttr {
// Joins a string Tensor across the given dimensions.
//
// Computes the string join across dimensions in the given string Tensor of shape
-// `[d_0, d_1, ..., d_n-1]`. Returns a new Tensor created by joining the input
+// `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input
// strings with the given separator (default: empty string). Negative indices are
-// counted backwards from the end, with `-1` being equivalent to `n - 1`.
+// counted backwards from the end, with `-1` being equivalent to `n - 1`. If
+// indices are not specified, joins across all dimensions beginning from `n - 1`
+// through `0`.
//
// For example:
//
@@ -13562,9 +14046,10 @@ func ReduceJoinSeparator(value string) ReduceJoinAttr {
// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
-// tf.reduce_join(a, [0, 1]) ==> ["acbd"]
-// tf.reduce_join(a, [1, 0]) ==> ["abcd"]
-// tf.reduce_join(a, []) ==> ["abcd"]
+// tf.reduce_join(a, [0, 1]) ==> "acbd"
+// tf.reduce_join(a, [1, 0]) ==> "abcd"
+// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
+// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
// ```
//
// Arguments:
@@ -14654,27 +15139,27 @@ func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr {
//
// rnn_mode: Indicates the type of the RNN model.
// input_mode: Indicate whether there is a linear projection between the input and
-// The actual computation before the first layer. 'skip_input' is only allowed
+// the actual computation before the first layer. 'skip_input' is only allowed
// when input_size == num_units; 'auto_select' implies 'skip_input' when
// input_size == num_units; otherwise, it implies 'linear_input'.
-// direction: Indicates whether a bidirectional model will be used.
-// dir = (direction == bidirectional) ? 2 : 1
-// dropout: dropout probability. When set to 0., dropout is disabled.
-// seed: the 1st part of a seed to initialize dropout.
-// seed2: the 2nd part of a seed to initialize dropout.
-// input: a 3-D tensor with the shape of [seq_length, batch_size, input_size].
-// input_h: a 3-D tensor with the shape of [num_layer * dir, batch_size,
+// direction: Indicates whether a bidirectional model will be used. Should be
+// "unidirectional" or "bidirectional".
+// dropout: Dropout probability. When set to 0., dropout is disabled.
+// seed: The 1st part of a seed to initialize dropout.
+// seed2: The 2nd part of a seed to initialize dropout.
+// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
+// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
// num_units].
// input_c: For LSTM, a 3-D tensor with the shape of
// [num_layer * dir, batch, num_units]. For other models, it is ignored.
-// params: a 1-D tensor that contains the weights and biases in an opaque layout.
+// params: A 1-D tensor that contains the weights and biases in an opaque layout.
// The size must be created through CudnnRNNParamsSize, and initialized
// separately. Note that they might not be compatible across different
// generations. So it is a good idea to save and restore
-// output: a 3-D tensor with the shape of [seq_length, batch_size,
+// output: A 3-D tensor with the shape of [seq_length, batch_size,
// dir * num_units].
-// output_h: the same shape has input_h.
-// output_c: the same shape as input_c for LSTM. An empty tensor for other models.
+// output_h: The same shape has input_h.
+// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
// pass.
@@ -15635,6 +16120,30 @@ func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataTyp
return key, values
}
+// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.
+//
+// Arguments:
+// tree_ensemble_handle: Handle to the tree ensemble.
+// mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node.
+// mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node.
+// l1: l1 regularization factor on leaf weights, per instance based.
+// l2: l2 regularization factor on leaf weights, per instance based.
+//
+// Returns Bool, whether to continue bias centering.
+func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, mean_hessians tf.Output, l1 tf.Output, l2 tf.Output) (continue_centering tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "BoostedTreesCenterBias",
+ Input: []tf.Input{
+ tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// SerializeManySparseAttr is an optional argument to SerializeManySparse.
type SerializeManySparseAttr func(optionalAttr)
@@ -17203,6 +17712,7 @@ func QuantizeV2RoundMode(value string) QuantizeV2Attr {
// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
// if T == qint8, out[i] -= (range(T) + 1) / 2.0
// ```
+//
// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
//
// *MIN_COMBINED Mode Example*
@@ -17246,6 +17756,7 @@ func QuantizeV2RoundMode(value string) QuantizeV2Attr {
//
// We first find the range of values in our tensor. The
// range we use is always centered on 0, so we find m such that
+//
// ```c++
// m = max(abs(input_min), abs(input_max))
// ```
@@ -17254,6 +17765,7 @@ func QuantizeV2RoundMode(value string) QuantizeV2Attr {
//
// Next, we choose our fixed-point quantization buckets, `[min_fixed, max_fixed]`.
// If T is signed, this is
+//
// ```
// num_bits = sizeof(T) * 8
// [min_fixed, max_fixed] =
@@ -17261,16 +17773,19 @@ func QuantizeV2RoundMode(value string) QuantizeV2Attr {
// ```
//
// Otherwise, if T is unsigned, the fixed-point range is
+//
// ```
// [min_fixed, max_fixed] = [0, (1 << num_bits) - 1]
// ```
//
// From this we compute our scaling factor, s:
+//
// ```c++
// s = (max_fixed - min_fixed) / (2 * m)
// ```
//
// Now we can quantize the elements of our tensor:
+//
// ```c++
// result = round(input * s)
// ```
@@ -17367,6 +17882,31 @@ func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_f
return op.Output(0), op.Output(1), op.Output(2)
}
+// Creates a dataset that batches `batch_size` elements from `input_dataset`.
+//
+// Arguments:
+//
+// batch_size: A scalar representing the number of elements to accumulate in a batch.
+// drop_remainder: A scalar representing whether the last batch should be dropped in case its size
+// is smaller than desired.
+//
+//
+func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "BatchDatasetV2",
+ Input: []tf.Input{
+ input_dataset, batch_size, drop_remainder,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
type QuantizedConv2DAttr func(optionalAttr)
@@ -18006,6 +18546,34 @@ func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtyp
return op.Output(0)
}
+// The gradient operator for the SparseSlice op.
+//
+// This op takes in the upstream gradient w.r.t. non-empty values of
+// the sliced `SparseTensor`, and outputs the gradients w.r.t.
+// the non-empty values of input `SparseTensor`.
+//
+// Arguments:
+// backprop_val_grad: 1-D. The gradient with respect to
+// the non-empty values of the sliced `SparseTensor`.
+// input_indices: 2-D. The `indices` of the input `SparseTensor`.
+// input_start: 1-D. tensor represents the start of the slice.
+// output_indices: 2-D. The `indices` of the sliced `SparseTensor`.
+//
+// Returns 1-D. The gradient with respect to the non-empty values of input `SparseTensor`.
+func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, input_start tf.Output, output_indices tf.Output) (val_grad tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SparseSliceGrad",
+ Input: []tf.Input{
+ backprop_val_grad, input_indices, input_start, output_indices,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Computes the gradient of the sigmoid of `x` wrt its input.
//
// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
@@ -18050,6 +18618,31 @@ func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
return op.Output(0)
}
+// Creates a dataset by applying optimizations to `input_dataset`.
+//
+// Creates a dataset by applying optimizations to `input_dataset`.
+//
+// Arguments:
+// input_dataset: A variant tensor representing the input dataset.
+// optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
+//
+//
+func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "OptimizeDataset",
+ Input: []tf.Input{
+ input_dataset, optimizations,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
//
// Arguments:
@@ -18224,6 +18817,26 @@ func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.
return scope.AddOperation(opspec)
}
+// Strip leading and trailing whitespaces from the Tensor.
+//
+// Arguments:
+// input: A string `Tensor` of any shape.
+//
+// Returns A string `Tensor` of the same shape as the input.
+func StringStrip(scope *Scope, input tf.Output) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "StringStrip",
+ Input: []tf.Input{
+ input,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Returns a tensor of ones with the same shape and type as x.
//
// Arguments:
@@ -18278,6 +18891,10 @@ func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_val
//
// if < 0, `scale * features` otherwise.
//
+// To be used together with
+// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
+// For correct dropout, use `tf.contrib.nn.alpha_dropout`.
+//
// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
if scope.Err() != nil {
@@ -18960,7 +19577,7 @@ func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
// adjoint.
//
// @compatibility(numpy)
-// Equivalent to np.linalg.triangular_solve
+// Equivalent to scipy.linalg.solve_triangular
// @end_compatibility
// If not specified, defaults to false
func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
@@ -19736,9 +20353,9 @@ func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyReso
// ```
//
// Arguments:
-// start: First entry in the range.
-// stop: Last entry in the range.
-// num: Number of values to generate.
+// start: 0-D tensor. First entry in the range.
+// stop: 0-D tensor. Last entry in the range.
+// num: 0-D tensor. Number of values to generate.
//
// Returns 1-D. The generated values.
func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
@@ -20919,6 +21536,37 @@ func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, v
return scope.AddOperation(opspec)
}
+// Creates a dataset that batches and pads `batch_size` elements from the input.
+//
+// Arguments:
+//
+// batch_size: A scalar representing the number of elements to accumulate in a
+// batch.
+// padded_shapes: A list of int64 tensors representing the desired padded shapes
+// of the corresponding output components. These shapes may be partially
+// specified, using `-1` to indicate that a particular dimension should be
+// padded to the maximum size of all batch elements.
+// padding_values: A list of scalars containing the padding value to use for
+// each of the outputs.
+// drop_remainder: A scalar representing whether the last batch should be dropped in case its size
+// is smaller than desired.
+//
+func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "PaddedBatchDatasetV2",
+ Input: []tf.Input{
+ input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Returns element-wise smallest integer in not less than x.
func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
@@ -21790,7 +22438,7 @@ func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
// generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
//
// The `bad_color` argument is the color to use in the generated images for
-// non-finite input values. It is a `unit8` 1-D tensor of length `channels`.
+// non-finite input values. It is a `uint8` 1-D tensor of length `channels`.
// Each element must be in the range `[0, 255]` (It represents the value of a
// pixel in the output image). Non-finite values in the input tensor are
// replaced by this tensor in the output image. The default value is the color
@@ -22248,7 +22896,7 @@ func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, it
// Computes the matrix exponential of one or more square matrices:
//
-// exp(A) = \sum_{n=0}^\infty A^n/n!
+// \\(exp(A) = \sum_{n=0}^\infty A^n/n!\\)
//
// The exponential is computed using a combination of the scaling and squaring
// method and the Pade approximation. Details can be founds in:
@@ -22628,6 +23276,28 @@ func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...Matr
return op.Output(0)
}
+// Returns a serialized GraphDef representing `input_dataset`.
+//
+// Returns a graph representation for `input_dataset`.
+//
+// Arguments:
+// input_dataset: A variant tensor representing the dataset to return the graph representation for.
+//
+// Returns The graph representation of the dataset (as serialized GraphDef).
+func DatasetToGraph(scope *Scope, input_dataset tf.Output) (graph tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "DatasetToGraph",
+ Input: []tf.Input{
+ input_dataset,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// SvdAttr is an optional argument to Svd.
type SvdAttr func(optionalAttr)
@@ -23651,10 +24321,10 @@ func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
// Update '*var' according to the Adam algorithm.
//
-// lr_t <- learning_rate * sqrt(1 - beta2^t) / (1 - beta1^t)
-// m_t <- beta1 * m_{t-1} + (1 - beta1) * g_t
-// v_t <- beta2 * v_{t-1} + (1 - beta2) * g_t * g_t
-// variable <- variable - lr_t * m_t / (sqrt(v_t) + epsilon)
+// $$lr_t := \text{learning_rate} * \sqrt{(1 - beta_2^t) / (1 - beta_1^t)}$$
+// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
+// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
+// $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
//
// Arguments:
// var_: Should be from a Variable().
@@ -24118,7 +24788,7 @@ func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistort
// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
//
// value: The cropped area of the image must contain a fraction of the
-// supplied image within in this range.
+// supplied image within this range.
// If not specified, defaults to <f:0.05 f:1 >
func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
return func(m optionalAttr) {
@@ -24627,10 +25297,57 @@ func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_ou
return op.Output(0)
}
+// Greedily selects a subset of bounding boxes in descending order of score,
+//
+// pruning away boxes that have high intersection-over-union (IOU) overlap
+// with previously selected boxes. Bounding boxes with score less than
+// `score_threshold` are removed. Bounding boxes are supplied as
+// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
+// diagonal pair of box corners and the coordinates can be provided as normalized
+// (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm
+// is agnostic to where the origin is in the coordinate system and more
+// generally is invariant to orthogonal transformations and translations
+// of the coordinate system; thus translating or reflections of the coordinate
+// system result in the same boxes being selected by the algorithm.
+// The output of this operation is a set of integers indexing into the input
+// collection of bounding boxes representing the selected boxes. The bounding
+// box coordinates corresponding to the selected indices can then be obtained
+// using the `tf.gather operation`. For example:
+// selected_indices = tf.image.non_max_suppression_v2(
+// boxes, scores, max_output_size, iou_threshold, score_threshold)
+// selected_boxes = tf.gather(boxes, selected_indices)
+//
+// Arguments:
+// boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
+// scores: A 1-D float tensor of shape `[num_boxes]` representing a single
+// score corresponding to each box (each row of boxes).
+// max_output_size: A scalar integer tensor representing the maximum number of
+// boxes to be selected by non max suppression.
+// iou_threshold: A 0-D float tensor representing the threshold for deciding whether
+// boxes overlap too much with respect to IOU.
+// score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
+// boxes based on score.
+//
+// Returns A 1-D integer tensor of shape `[M]` representing the selected
+// indices from the boxes tensor, where `M <= max_output_size`.
+func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "NonMaxSuppressionV3",
+ Input: []tf.Input{
+ boxes, scores, max_output_size, iou_threshold, score_threshold,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Computes the matrix logarithm of one or more square matrices:
//
//
-// log(exp(A)) = A
+// \\(log(exp(A)) = A\\)
//
// This op is only defined for complex matrices. If A is positive-definite and
// real, then casting to a complex matrix, taking the logarithm and casting back
@@ -24667,6 +25384,31 @@ func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output) {
return op.Output(0)
}
+// This op is used as a placeholder in If branch functions. It doesn't provide a
+// valid output when run, so must either be removed (e.g. replaced with a
+// function input) or guaranteed not to be used (e.g. if mirroring an
+// intermediate output needed for the gradient computation of the other branch).
+//
+// Arguments:
+// dtype: The type of the output.
+// shape: The purported shape of the output. This is only used for shape inference;
+// the output will not necessarily have this shape. Can be a partial shape.
+//
+// Returns \"Fake\" output value. This should not be consumed by another op.
+func FakeParam(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
+ opspec := tf.OpSpec{
+ Type: "FakeParam",
+
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// EncodeProtoAttr is an optional argument to EncodeProto.
type EncodeProtoAttr func(optionalAttr)
@@ -25008,6 +25750,23 @@ func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
return scope.AddOperation(opspec)
}
+// A dataset that splits the elements of its input into multiple elements.
+func UnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "UnbatchDataset",
+ Input: []tf.Input{
+ input_dataset,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// RpcAttr is an optional argument to Rpc.
type RpcAttr func(optionalAttr)
@@ -25260,6 +26019,36 @@ func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset t
return op.Output(0)
}
+// Debugging/model interpretability outputs for each example.
+//
+// It traverses all the trees and computes debug metrics for individual examples,
+// such as getting split feature ids and logits after each split along the decision
+// path used to compute directional feature contributions.
+//
+// Arguments:
+//
+// bucketized_features: A list of rank 1 Tensors containing bucket id for each
+// feature.
+// logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in
+// examples_debug_outputs_serialized.
+//
+// Returns Output rank 1 Tensor containing a proto serialized as a string for each example.
+func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (examples_debug_outputs_serialized tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"logits_dimension": logits_dimension}
+ opspec := tf.OpSpec{
+ Type: "BoostedTreesExampleDebugOutputs",
+ Input: []tf.Input{
+ tree_ensemble_handle, tf.OutputList(bucketized_features),
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Adds a value to the current value of a variable.
//
// Any ReadVariableOp with a control dependency on this op is guaranteed to
@@ -25959,6 +26748,26 @@ func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Outp
return op.Output(0)
}
+// A container for an iterator resource.
+//
+// Returns A handle to the iterator that can be passed to a "MakeIterator" or
+// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
+// resource sharing by name, and does not keep a reference to the resource
+// container.
+func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "AnonymousIterator",
+
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// BatchToSpace for 4-D tensors of type T.
//
// This is a legacy version of the more general BatchToSpaceND.
@@ -26462,6 +27271,28 @@ func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
return op.Output(0)
}
+// Writes the given dataset to the given file using the TFRecord format.
+//
+// Arguments:
+// input_dataset: A variant tensor representing the dataset to write.
+// filename: A scalar string tensor representing the filename to use.
+// compression_type: A scalar string tensor containing either (i) the empty string (no
+// compression), (ii) "ZLIB", or (iii) "GZIP".
+//
+// Returns the created operation.
+func DatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "DatasetToTFRecord",
+ Input: []tf.Input{
+ input_dataset, filename, compression_type,
+ },
+ }
+ return scope.AddOperation(opspec)
+}
+
// AvgPool3DAttr is an optional argument to AvgPool3D.
type AvgPool3DAttr func(optionalAttr)
@@ -26509,6 +27340,26 @@ func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, pa
return op.Output(0)
}
+// A placeholder for input pipeline graph optimizations.
+//
+// A placeholder for input pipeline graph optimizations.
+//
+// Arguments:
+// input_dataset: A variant tensor representing the input dataset.
+func SinkDataset(scope *Scope, input_dataset tf.Output) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "SinkDataset",
+ Input: []tf.Input{
+ input_dataset,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Performs a padding as a preprocess during a convolution.
//
// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
@@ -27064,6 +27915,26 @@ func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, opti
return scope.AddOperation(opspec)
}
+// Computes the Bessel i0e function of `x` element-wise.
+//
+// Exponentially scaled modified Bessel function of order 0 defined as
+// `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
+//
+// This function is faster and numerically stabler than `bessel_i0(x)`.
+func BesselI0e(scope *Scope, x tf.Output) (y tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ opspec := tf.OpSpec{
+ Type: "BesselI0e",
+ Input: []tf.Input{
+ x,
+ },
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
type QueueDequeueManyV2Attr func(optionalAttr)
@@ -27174,6 +28045,29 @@ func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (
return op.Output(0)
}
+// A dataset that creates window datasets from the input dataset.
+//
+// Arguments:
+//
+// window_size: A scalar representing the number of elements to accumulate in a window.
+//
+//
+func WindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
+ opspec := tf.OpSpec{
+ Type: "WindowDataset",
+ Input: []tf.Input{
+ input_dataset, window_size,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0)
+}
+
// Deprecated. Use TensorArrayCloseV3
//
// DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
@@ -27546,30 +28440,30 @@ func CudnnRNNIsTraining(value bool) CudnnRNNAttr {
//
// rnn_mode: Indicates the type of the RNN model.
// input_mode: Indicate whether there is a linear projection between the input and
-// The actual computation before the first layer. 'skip_input' is only allowed
+// the actual computation before the first layer. 'skip_input' is only allowed
// when input_size == num_units; 'auto_select' implies 'skip_input' when
// input_size == num_units; otherwise, it implies 'linear_input'.
-// direction: Indicates whether a bidirectional model will be used.
-// dir = (direction == bidirectional) ? 2 : 1
-// dropout: dropout probability. When set to 0., dropout is disabled.
-// seed: the 1st part of a seed to initialize dropout.
-// seed2: the 2nd part of a seed to initialize dropout.
-// input: a 3-D tensor with the shape of [seq_length, batch_size, input_size].
-// input_h: a 3-D tensor with the shape of [num_layer * dir, batch_size,
+// direction: Indicates whether a bidirectional model will be used. Should be
+// "unidirectional" or "bidirectional".
+// dropout: Dropout probability. When set to 0., dropout is disabled.
+// seed: The 1st part of a seed to initialize dropout.
+// seed2: The 2nd part of a seed to initialize dropout.
+// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
+// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
// num_units].
// input_c: For LSTM, a 3-D tensor with the shape of
// [num_layer * dir, batch, num_units]. For other models, it is ignored.
-// params: a 1-D tensor that contains the weights and biases in an opaque layout.
+// params: A 1-D tensor that contains the weights and biases in an opaque layout.
// The size must be created through CudnnRNNParamsSize, and initialized
// separately. Note that they might not be compatible across different
// generations. So it is a good idea to save and restore
-// output: a 3-D tensor with the shape of [seq_length, batch_size,
+// output: A 3-D tensor with the shape of [seq_length, batch_size,
// dir * num_units].
-// output_h: the same shape has input_h.
-// output_c: the same shape as input_c for LSTM. An empty tensor for other models.
+// output_h: The same shape has input_h.
+// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
// is_training: Indicates whether this operation is used for inferenece or
// training.
-// reserve_space: an opaque tensor that can be used in backprop calculation. It
+// reserve_space: An opaque tensor that can be used in backprop calculation. It
// is only produced if is_training is false.
func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNAttr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output) {
if scope.Err() != nil {
@@ -27590,6 +28484,37 @@ func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Outpu
return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
}
+// Creates a TensorArray for storing multiple gradients of values in the given handle.
+//
+// Similar to TensorArrayGradV3. However it creates an accumulator with an
+// expanded shape compared to the input TensorArray whose gradient is being
+// computed. This enables multiple gradients for the same TensorArray to be
+// calculated using the same accumulator.
+//
+// Arguments:
+// handle: The handle to the forward TensorArray.
+// flow_in: A float scalar that enforces proper chaining of operations.
+// shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will
+// have shape which is this shape_to_prepend value concatenated with shape of the
+// elements in the TensorArray corresponding to the input handle.
+// source: The gradient source string, used to decide which gradient TensorArray
+// to return.
+func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{"source": source}
+ opspec := tf.OpSpec{
+ Type: "TensorArrayGradWithShape",
+ Input: []tf.Input{
+ handle, flow_in, shape_to_prepend,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1)
+}
+
// Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
//
// Each comparison returns a boolean `true` (if `input_value > threshold`)
@@ -27980,7 +28905,7 @@ func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional
//
// For example, if an image is 100 x 200 pixels (height x width) and the bounding
// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
-// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
+// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
//
// Parts of the bounding box may fall outside the image.
//
@@ -28321,7 +29246,7 @@ func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, st
// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
//
// `indices` must be integer tensor, containing indices into `input`.
-// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
+// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
//
// The innermost dimension of `indices` (with length `K`) corresponds to
// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
@@ -28329,9 +29254,7 @@ func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, st
//
// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
//
-// ```
-// [d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].
-// ```
+// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
//
// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
// elements. In Python, that addition would look like this:
@@ -29092,6 +30015,119 @@ func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSi
return op.Output(0)
}
+// CudnnRNNV2Attr is an optional argument to CudnnRNNV2.
+type CudnnRNNV2Attr func(optionalAttr)
+
+// CudnnRNNV2RnnMode sets the optional rnn_mode attribute to value.
+// If not specified, defaults to "lstm"
+func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["rnn_mode"] = value
+ }
+}
+
+// CudnnRNNV2InputMode sets the optional input_mode attribute to value.
+// If not specified, defaults to "linear_input"
+func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["input_mode"] = value
+ }
+}
+
+// CudnnRNNV2Direction sets the optional direction attribute to value.
+// If not specified, defaults to "unidirectional"
+func CudnnRNNV2Direction(value string) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["direction"] = value
+ }
+}
+
+// CudnnRNNV2Dropout sets the optional dropout attribute to value.
+// If not specified, defaults to 0
+func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["dropout"] = value
+ }
+}
+
+// CudnnRNNV2Seed sets the optional seed attribute to value.
+// If not specified, defaults to 0
+func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["seed"] = value
+ }
+}
+
+// CudnnRNNV2Seed2 sets the optional seed2 attribute to value.
+// If not specified, defaults to 0
+func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["seed2"] = value
+ }
+}
+
+// CudnnRNNV2IsTraining sets the optional is_training attribute to value.
+// If not specified, defaults to true
+func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr {
+ return func(m optionalAttr) {
+ m["is_training"] = value
+ }
+}
+
+// A RNN backed by cuDNN.
+//
+// Computes the RNN from the input and initial states, with respect to the params
+// buffer. Produces one extra output "host_reserved" than CudnnRNN.
+//
+// rnn_mode: Indicates the type of the RNN model.
+// input_mode: Indicates whether there is a linear projection between the input and
+// the actual computation before the first layer. 'skip_input' is only allowed
+// when input_size == num_units; 'auto_select' implies 'skip_input' when
+// input_size == num_units; otherwise, it implies 'linear_input'.
+// direction: Indicates whether a bidirectional model will be used. Should be
+// "unidirectional" or "bidirectional".
+// dropout: Dropout probability. When set to 0., dropout is disabled.
+// seed: The 1st part of a seed to initialize dropout.
+// seed2: The 2nd part of a seed to initialize dropout.
+// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
+// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
+// num_units].
+// input_c: For LSTM, a 3-D tensor with the shape of
+// [num_layer * dir, batch, num_units]. For other models, it is ignored.
+// params: A 1-D tensor that contains the weights and biases in an opaque layout.
+// The size must be created through CudnnRNNParamsSize, and initialized
+// separately. Note that they might not be compatible across different
+// generations. So it is a good idea to save and restore
+// output: A 3-D tensor with the shape of [seq_length, batch_size,
+// dir * num_units].
+// output_h: The same shape has input_h.
+// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
+// is_training: Indicates whether this operation is used for inferenece or
+// training.
+// reserve_space: An opaque tensor that can be used in backprop calculation. It
+// is only produced if is_training is true.
+// host_reserved: An opaque tensor that can be used in backprop calculation. It is
+// only produced if is_training is true. It is output on host memory rather than
+// device memory.
+func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNV2Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
+ if scope.Err() != nil {
+ return
+ }
+ attrs := map[string]interface{}{}
+ for _, a := range optional {
+ a(attrs)
+ }
+ opspec := tf.OpSpec{
+ Type: "CudnnRNNV2",
+ Input: []tf.Input{
+ input, input_h, input_c, params,
+ },
+ Attrs: attrs,
+ }
+ op := scope.AddOperation(opspec)
+ return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
+}
+
// ShapeNAttr is an optional argument to ShapeN.
type ShapeNAttr func(optionalAttr)
diff --git a/tensorflow/java/maven/hadoop/pom.xml b/tensorflow/java/maven/hadoop/pom.xml
index 7391dfb965..2c2c4106cb 100644
--- a/tensorflow/java/maven/hadoop/pom.xml
+++ b/tensorflow/java/maven/hadoop/pom.xml
@@ -5,7 +5,7 @@
<groupId>org.tensorflow</groupId>
<artifactId>hadoop</artifactId>
<packaging>jar</packaging>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<name>tensorflow-hadoop</name>
<url>https://www.tensorflow.org</url>
<description>TensorFlow TFRecord InputFormat/OutputFormat for Apache Hadoop</description>
diff --git a/tensorflow/java/maven/libtensorflow/pom.xml b/tensorflow/java/maven/libtensorflow/pom.xml
index d44bdf8f81..5d4e04ecd3 100644
--- a/tensorflow/java/maven/libtensorflow/pom.xml
+++ b/tensorflow/java/maven/libtensorflow/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>libtensorflow</artifactId>
diff --git a/tensorflow/java/maven/libtensorflow_jni/pom.xml b/tensorflow/java/maven/libtensorflow_jni/pom.xml
index e8925c6fb1..e107904f7d 100644
--- a/tensorflow/java/maven/libtensorflow_jni/pom.xml
+++ b/tensorflow/java/maven/libtensorflow_jni/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>libtensorflow_jni</artifactId>
diff --git a/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml b/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml
index 3bf4a2590c..b3c525233f 100644
--- a/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml
+++ b/tensorflow/java/maven/libtensorflow_jni_gpu/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>libtensorflow_jni_gpu</artifactId>
diff --git a/tensorflow/java/maven/pom.xml b/tensorflow/java/maven/pom.xml
index b96dcf2888..a2943a3172 100644
--- a/tensorflow/java/maven/pom.xml
+++ b/tensorflow/java/maven/pom.xml
@@ -6,7 +6,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<packaging>pom</packaging>
<url>https://www.tensorflow.org</url>
diff --git a/tensorflow/java/maven/proto/pom.xml b/tensorflow/java/maven/proto/pom.xml
index 5581d864d7..7080d81b7d 100644
--- a/tensorflow/java/maven/proto/pom.xml
+++ b/tensorflow/java/maven/proto/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>proto</artifactId>
diff --git a/tensorflow/java/maven/spark-connector/pom.xml b/tensorflow/java/maven/spark-connector/pom.xml
index 64956be02c..003d09a0b7 100644
--- a/tensorflow/java/maven/spark-connector/pom.xml
+++ b/tensorflow/java/maven/spark-connector/pom.xml
@@ -6,7 +6,7 @@
<groupId>org.tensorflow</groupId>
<artifactId>spark-connector_2.11</artifactId>
<packaging>jar</packaging>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<name>spark-tensorflow-connector</name>
<url>https://www.tensorflow.org</url>
<description>TensorFlow TFRecord connector for Apache Spark DataFrames</description>
diff --git a/tensorflow/java/maven/tensorflow/pom.xml b/tensorflow/java/maven/tensorflow/pom.xml
index 92e15aa2c7..b9affbf699 100644
--- a/tensorflow/java/maven/tensorflow/pom.xml
+++ b/tensorflow/java/maven/tensorflow/pom.xml
@@ -6,7 +6,7 @@
<parent>
<groupId>org.tensorflow</groupId>
<artifactId>parentpom</artifactId>
- <version>1.9.0</version>
+ <version>1.10.0-rc0</version>
<relativePath>../</relativePath>
</parent>
<artifactId>tensorflow</artifactId>
diff --git a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
index c8b9126f03..49594e6b47 100644
--- a/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
+++ b/tensorflow/java/src/main/java/org/tensorflow/SavedModelBundle.java
@@ -25,18 +25,86 @@ package org.tensorflow;
* protocol buffer</a>).
*/
public class SavedModelBundle implements AutoCloseable {
+ /** Options for loading a SavedModel. */
+ public static final class Loader {
+ /** Load a <code>SavedModelBundle</code> with the configured options. */
+ public SavedModelBundle load() {
+ return SavedModelBundle.load(exportDir, tags, configProto, runOptions);
+ }
+
+ /**
+ * Sets options to use when executing model initialization operations.
+ *
+ * @param options Serialized <a
+ * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">RunOptions
+ * protocol buffer</a>.
+ */
+ public Loader withRunOptions(byte[] options) {
+ this.runOptions = options;
+ return this;
+ }
+
+ /**
+ * Set configuration of the <code>Session</code> object created when loading the model.
+ *
+ * @param configProto Serialized <a
+ * href="https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto">ConfigProto
+ * protocol buffer</a>.
+ */
+ public Loader withConfigProto(byte[] configProto) {
+ this.configProto = configProto;
+ return this;
+ }
+
+ /**
+ * Sets the set of tags that identify the specific graph in the saved model to load.
+ *
+ * @param tags the tags identifying the specific MetaGraphDef to load.
+ */
+ public Loader withTags(String... tags) {
+ this.tags = tags;
+ return this;
+ }
+
+ private Loader(String exportDir) {
+ this.exportDir = exportDir;
+ }
+
+ private String exportDir = null;
+ private String[] tags = null;
+ private byte[] configProto = null;
+ private byte[] runOptions = null;
+ }
/**
* Load a saved model from an export directory. The model that is being loaded should be created
* using the <a href="https://www.tensorflow.org/api_docs/python/tf/saved_model">Saved Model
* API</a>.
*
+ * <p>This method is a shorthand for:
+ *
+ * <pre>{@code
+ * SavedModelBundler.loader().withTags(tags).load();
+ * }</pre>
+ *
* @param exportDir the directory path containing a saved model.
* @param tags the tags identifying the specific metagraphdef to load.
* @return a bundle containing the graph and associated session.
*/
public static SavedModelBundle load(String exportDir, String... tags) {
- return load(exportDir, tags, null);
+ return loader(exportDir).withTags(tags).load();
+ }
+
+ /**
+ * Load a saved model.
+ *
+ * <p/>Returns a <code>Loader</code> object that can set configuration options before actually
+ * loading the model,
+ *
+ * @param exportDir the directory path containing a saved model.
+ */
+ public static Loader loader(String exportDir) {
+ return new Loader(exportDir);
}
/**
@@ -95,7 +163,8 @@ public class SavedModelBundle implements AutoCloseable {
return new SavedModelBundle(graph, session, metaGraphDef);
}
- private static native SavedModelBundle load(String exportDir, String[] tags, byte[] runOptions);
+ private static native SavedModelBundle load(
+ String exportDir, String[] tags, byte[] config, byte[] runOptions);
static {
TensorFlow.init();
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java
deleted file mode 100644
index 49e5d9f2f3..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFDouble.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-
-package org.tensorflow.types;
-
-import org.tensorflow.DataType;
-
-/** Represents a 64-bit double precision floating point number. */
-public class TFDouble implements TFType {
- private TFDouble() {}
- static {
- Types.typeCodes.put(TFDouble.class, DataType.DOUBLE);
- }
- static {
- Types.scalars.put(TFDouble.class, 0.0);
- }
-}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java
deleted file mode 100644
index 8426ee41f0..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFFloat.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-
-package org.tensorflow.types;
-
-import org.tensorflow.DataType;
-
-/** Represents a 32-bit single precision floating point number. */
-public class TFFloat implements TFType {
- private TFFloat() {}
- static {
- Types.typeCodes.put(TFFloat.class, DataType.FLOAT);
- }
- static {
- Types.scalars.put(TFFloat.class, 0f);
- }
-}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java
deleted file mode 100644
index 3947b6ad09..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFInt32.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-
-package org.tensorflow.types;
-
-import org.tensorflow.DataType;
-
-/** Represents a 32-bit signed integer. */
-public class TFInt32 implements TFType {
- private TFInt32() {}
- static {
- Types.typeCodes.put(TFInt32.class, DataType.INT32);
- }
- static {
- Types.scalars.put(TFInt32.class, 0);
- }
-}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java
deleted file mode 100644
index ccdded8693..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFInt64.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-
-package org.tensorflow.types;
-
-import org.tensorflow.DataType;
-
-/** Represents a 64-bit signed integer. */
-public class TFInt64 implements TFType {
- private TFInt64() {}
- static {
- Types.typeCodes.put(TFInt64.class, DataType.INT64);
- }
- static {
- Types.scalars.put(TFInt64.class, 0L);
- }
-}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFString.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFString.java
deleted file mode 100644
index e7327e8c57..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFString.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-
-package org.tensorflow.types;
-
-import org.tensorflow.DataType;
-
-/** Represents an arbitrary sequence of bytes. */
-public class TFString implements TFType {
- private TFString() {}
- static {
- Types.typeCodes.put(TFString.class, DataType.STRING);
- }
-}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFType.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFType.java
deleted file mode 100644
index 562953ac9d..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFType.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-package org.tensorflow.types;
-
-/**
- * A marker interface for classes representing TensorFlow types.
- */
-public interface TFType {}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java b/tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java
deleted file mode 100644
index d7305ca5a8..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/TFUInt8.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-// GENERATED FILE. To update, edit tftypes.pl instead.
-
-package org.tensorflow.types;
-
-import org.tensorflow.DataType;
-
-/** Represents an 8-bit unsigned integer. */
-public class TFUInt8 implements TFType {
- private TFUInt8() {}
- static {
- Types.typeCodes.put(TFUInt8.class, DataType.UINT8);
- }
- static {
- Types.scalars.put(TFUInt8.class, (byte)0);
- }
-}
diff --git a/tensorflow/java/src/main/java/org/tensorflow/types/Types.java b/tensorflow/java/src/main/java/org/tensorflow/types/Types.java
deleted file mode 100644
index 976cd9fd34..0000000000
--- a/tensorflow/java/src/main/java/org/tensorflow/types/Types.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-==============================================================================*/
-package org.tensorflow.types;
-
-import java.util.HashMap;
-import java.util.Map;
-import org.tensorflow.DataType;
-
-/**
- * Utility class for managing the representation of TensorFlow types as Java
- * types. For each TensorFlow type (e.g., int32), there is a corresponding Java
- * type (e.g., TFInt32) that represents it at compile time and a corresponding
- * class object (e.g., TFInt32.class) that represents it at run time. There is
- * also an enumeration value in DataType that can be used to represent the
- * type, though that should rarely be required.
- */
-public class Types {
-
- private Types() {} // not instantiable
-
- static final Map<Class<?>, DataType> typeCodes = new HashMap<>();
-
- /** Returns the DataType value corresponding to a TensorFlow type class. */
- public static DataType dataType(Class<? extends TFType> c) {
- DataType dtype = typeCodes.get(c);
- if (dtype == null) {
- throw new IllegalArgumentException("" + c + " is not a TensorFlow type.");
- }
- return dtype;
- }
-
- static final Map<Class<?>, Object> scalars = new HashMap<>();
-
- /** Returns the zero value of type described by {@code c}, or null if
- * the type (e.g., string) is not numeric and therefore has no zero value.
- */
- public static Object zeroValue(Class<? extends TFType> c) {
- return scalars.get(c);
- }
-}
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
index de6382a79c..68999fb2da 100644
--- a/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.cc
@@ -22,12 +22,25 @@ limitations under the License.
JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
JNIEnv* env, jclass clazz, jstring export_dir, jobjectArray tags,
- jbyteArray run_options) {
+ jbyteArray config, jbyteArray run_options) {
TF_Status* status = TF_NewStatus();
jobject bundle = nullptr;
// allocate parameters for TF_LoadSessionFromSavedModel
TF_SessionOptions* opts = TF_NewSessionOptions();
+ if (config != nullptr) {
+ size_t sz = env->GetArrayLength(config);
+ if (sz > 0) {
+ jbyte* config_data = env->GetByteArrayElements(config, nullptr);
+ TF_SetConfig(opts, static_cast<void*>(config_data), sz, status);
+ env->ReleaseByteArrayElements(config, config_data, JNI_ABORT);
+ if (!throwExceptionIfNotOK(env, status)) {
+ TF_DeleteSessionOptions(opts);
+ TF_DeleteStatus(status);
+ return nullptr;
+ }
+ }
+ }
TF_Buffer* crun_options = nullptr;
if (run_options != nullptr) {
size_t sz = env->GetArrayLength(run_options);
diff --git a/tensorflow/java/src/main/native/saved_model_bundle_jni.h b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
index 6cce6a81bd..a4b05d0409 100644
--- a/tensorflow/java/src/main/native/saved_model_bundle_jni.h
+++ b/tensorflow/java/src/main/native/saved_model_bundle_jni.h
@@ -26,10 +26,10 @@ extern "C" {
* Class: org_tensorflow_SavedModelBundle
* Method: load
* Signature:
- * (Ljava/lang/String;[Ljava/lang/String;[B)Lorg/tensorflow/SavedModelBundle;
+ * (Ljava/lang/String;[Ljava/lang/String;[B;[B)Lorg/tensorflow/SavedModelBundle;
*/
JNIEXPORT jobject JNICALL Java_org_tensorflow_SavedModelBundle_load(
- JNIEnv *, jclass, jstring, jobjectArray, jbyteArray);
+ JNIEnv *, jclass, jstring, jobjectArray, jbyteArray, jbyteArray);
#ifdef __cplusplus
} // extern "C"
diff --git a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
index 7922f3329c..7d936867a7 100644
--- a/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
+++ b/tensorflow/java/src/test/java/org/tensorflow/SavedModelBundleTest.java
@@ -47,7 +47,61 @@ public class SavedModelBundleTest {
fail("not expected");
} catch (org.tensorflow.TensorFlowException e) {
// expected exception
- assertTrue(e.getMessage().contains("SavedModel not found"));
+ assertTrue(e.getMessage().contains("Could not find SavedModel"));
}
}
+
+ @Test
+ public void loader() {
+ try (SavedModelBundle bundle = SavedModelBundle.loader(SAVED_MODEL_PATH)
+ .withTags("serve")
+ .withConfigProto(sillyConfigProto())
+ .withRunOptions(sillyRunOptions())
+ .load()) {
+ assertNotNull(bundle.session());
+ assertNotNull(bundle.graph());
+ assertNotNull(bundle.metaGraphDef());
+ }
+ }
+
+ private static byte[] sillyRunOptions() {
+ // Ideally this would use the generated Java sources for protocol buffers
+ // and end up with something like the snippet below. However, generating
+ // the Java files for the .proto files in tensorflow/core:protos_all is
+ // a bit cumbersome in bazel until the proto_library rule is setup.
+ //
+ // See https://github.com/bazelbuild/bazel/issues/52#issuecomment-194341866
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251515362
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251692558
+ //
+ // For this test, for now, the use of specific bytes suffices.
+ return new byte[] {0x08, 0x03};
+ /*
+ return org.tensorflow.framework.RunOptions.newBuilder()
+ .setTraceLevel(RunOptions.TraceLevel.FULL_TRACE)
+ .build()
+ .toByteArray();
+ */
+ }
+
+ public static byte[] sillyConfigProto() {
+ // Ideally this would use the generated Java sources for protocol buffers
+ // and end up with something like the snippet below. However, generating
+ // the Java files for the .proto files in tensorflow/core:protos_all is
+ // a bit cumbersome in bazel until the proto_library rule is setup.
+ //
+ // See https://github.com/bazelbuild/bazel/issues/52#issuecomment-194341866
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251515362
+ // https://github.com/bazelbuild/rules_go/pull/121#issuecomment-251692558
+ //
+ // For this test, for now, the use of specific bytes suffices.
+ return new byte[] {0x10, 0x01, 0x28, 0x01};
+ /*
+ return org.tensorflow.framework.ConfigProto.newBuilder()
+ .setInterOpParallelismThreads(1)
+ .setIntraOpParallelismThreads(1)
+ .build()
+ .toByteArray();
+ */
+ }
}
diff --git a/tensorflow/python/BUILD b/tensorflow/python/BUILD
index f2ab2f80e6..62020d3de2 100644
--- a/tensorflow/python/BUILD
+++ b/tensorflow/python/BUILD
@@ -74,7 +74,7 @@ py_library(
visibility = [
"//tensorflow:__pkg__",
"//tensorflow/python/tools:__pkg__",
- "//tensorflow/tools/api/generator:__pkg__",
+ "//tensorflow/python/tools/api/generator:__pkg__",
],
deps = [
":array_ops",
@@ -97,6 +97,7 @@ py_library(
":image_ops",
":initializers_ns",
":io_ops",
+ ":kernels",
":layers",
":lib",
":list_ops",
@@ -706,7 +707,9 @@ py_library(
"framework/error_interpolation.py",
],
srcs_version = "PY2AND3",
- deps = [],
+ deps = [
+ ":util",
+ ],
)
py_library(
@@ -744,8 +747,8 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":framework",
+ ":framework_ops",
":function",
- ":op_def_registry",
":tensor_shape",
":versions",
"//tensorflow/core:protos_all_py",
@@ -761,8 +764,10 @@ py_test(
deps = [
":array_ops",
":client_testlib",
+ ":constant_op",
":dtypes",
":framework_ops",
+ ":function",
":function_def_to_graph",
":graph_to_function_def",
":math_ops",
@@ -787,6 +792,19 @@ py_library(
)
py_library(
+ name = "kernels",
+ srcs = [
+ "framework/kernels.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":pywrap_tensorflow",
+ ":util",
+ "//tensorflow/core:protos_all_py",
+ ],
+)
+
+py_library(
name = "op_def_library",
srcs = ["framework/op_def_library.py"],
srcs_version = "PY2AND3",
@@ -823,6 +841,7 @@ py_library(
":platform",
":registry",
":tensor_shape",
+ ":traceable_stack",
":util",
":versions",
"//tensorflow/core:protos_all_py",
@@ -888,6 +907,17 @@ py_library(
],
)
+# This target is maintained separately from :util to provide separate visibility
+# for legacy users who were granted visibility when the functions were private
+# members of ops.Graph.
+py_library(
+ name = "tf_stack",
+ srcs = ["util/tf_stack.py"],
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [],
+)
+
py_library(
name = "tensor_shape",
srcs = ["framework/tensor_shape.py"],
@@ -923,6 +953,16 @@ py_library(
)
py_library(
+ name = "traceable_stack",
+ srcs = ["framework/traceable_stack.py"],
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":util",
+ ],
+)
+
+py_library(
name = "versions",
srcs = ["framework/versions.py"],
srcs_version = "PY2AND3",
@@ -1019,7 +1059,9 @@ py_test(
srcs_version = "PY2AND3",
deps = [
":client_testlib",
+ ":constant_op",
":error_interpolation",
+ ":traceable_stack",
],
)
@@ -1208,6 +1250,21 @@ py_test(
],
)
+py_test(
+ name = "framework_traceable_stack_test",
+ size = "small",
+ srcs = ["framework/traceable_stack_test.py"],
+ main = "framework/traceable_stack_test.py",
+ srcs_version = "PY2AND3",
+ deps = [
+ ":framework_test_lib",
+ ":platform_test",
+ ":test_ops",
+ ":traceable_stack",
+ ":util",
+ ],
+)
+
tf_gen_op_wrapper_py(
name = "test_ops",
out = "framework/test_ops.py",
@@ -1440,6 +1497,20 @@ py_test(
],
)
+py_test(
+ name = "framework_kernels_test",
+ size = "small",
+ srcs = ["framework/kernels_test.py"],
+ main = "framework/kernels_test.py",
+ srcs_version = "PY2AND3",
+ deps = [
+ ":framework_test_lib",
+ ":kernels",
+ ":platform_test",
+ ":test_ops",
+ ],
+)
+
tf_gen_op_wrapper_private_py(
name = "array_ops_gen",
visibility = [
@@ -2097,8 +2168,8 @@ py_library(
":linalg_ops_gen",
":linalg_ops_impl",
":math_ops",
- ":nn_ops",
":random_ops",
+ ":util",
"//third_party/py/numpy",
],
)
@@ -3007,6 +3078,20 @@ cuda_py_test(
)
cuda_py_test(
+ name = "init_ops_test",
+ size = "small",
+ srcs = ["ops/init_ops_test.py"],
+ additional_deps = [
+ ":client_testlib",
+ ":init_ops",
+ ":framework_ops",
+ ":resource_variable_ops",
+ "//third_party/py/numpy",
+ "//tensorflow/python/eager:context",
+ ],
+)
+
+cuda_py_test(
name = "math_grad_test",
size = "small",
srcs = ["ops/math_grad_test.py"],
@@ -3298,6 +3383,9 @@ py_library(
],
),
srcs_version = "PY2AND3",
+ visibility = visibility + [
+ "//tensorflow:__pkg__",
+ ],
deps = [
"//third_party/py/numpy",
"@org_python_pypi_backports_weakref",
@@ -3320,6 +3408,7 @@ py_test(
":math_ops",
":util",
"//third_party/py/numpy",
+ "@absl_py//absl/testing:parameterized",
],
)
diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py
index e037925961..180bb74d00 100644
--- a/tensorflow/python/client/session.py
+++ b/tensorflow/python/client/session.py
@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import collections
import functools
import re
import threading
@@ -243,7 +244,7 @@ class _FetchMapper(object):
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
- elif isinstance(fetch, dict):
+ elif isinstance(fetch, collections.Mapping):
return _DictFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
@@ -540,10 +541,11 @@ class _DeviceAttributes(object):
(in bytes).
"""
- def __init__(self, name, device_type, memory_limit_bytes):
+ def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
+ self._incarnation = incarnation
@property
def name(self):
@@ -557,11 +559,16 @@ class _DeviceAttributes(object):
def memory_limit_bytes(self):
return self._memory_limit_bytes
+ @property
+ def incarnation(self):
+ return self._incarnation
+
def __repr__(self):
- return '_DeviceAttributes(%s, %s, %d)' % (
+ return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
+ self.incarnation,
)
@@ -658,7 +665,9 @@ class BaseSession(SessionInterface):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
- device_list.append(_DeviceAttributes(name, device_type, memory))
+ incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
+ device_list.append(
+ _DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
diff --git a/tensorflow/python/client/session_list_devices_test.py b/tensorflow/python/client/session_list_devices_test.py
index c5d82c213a..dd381c689f 100644
--- a/tensorflow/python/client/session_list_devices_test.py
+++ b/tensorflow/python/client/session_list_devices_test.py
@@ -37,6 +37,8 @@ class SessionListDevicesTest(test_util.TensorFlowTestCase):
devices = sess.list_devices()
self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in set(
[d.name for d in devices]), devices)
+ # All valid device incarnations must be non-zero.
+ self.assertTrue(all(d.incarnation != 0 for d in devices))
def testInvalidDeviceNumber(self):
opts = tf_session.TF_NewSessionOptions()
@@ -54,6 +56,8 @@ class SessionListDevicesTest(test_util.TensorFlowTestCase):
devices = sess.list_devices()
self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in set(
[d.name for d in devices]), devices)
+ # All valid device incarnations must be non-zero.
+ self.assertTrue(all(d.incarnation != 0 for d in devices))
def testListDevicesClusterSpecPropagation(self):
server1 = server_lib.Server.create_local_server()
@@ -67,11 +71,13 @@ class SessionListDevicesTest(test_util.TensorFlowTestCase):
config = config_pb2.ConfigProto(cluster_def=cluster_def)
with session.Session(server1.target, config=config) as sess:
devices = sess.list_devices()
- device_names = set([d.name for d in devices])
+ device_names = set(d.name for d in devices)
self.assertTrue(
'/job:worker/replica:0/task:0/device:CPU:0' in device_names)
self.assertTrue(
'/job:worker/replica:0/task:1/device:CPU:0' in device_names)
+ # All valid device incarnations must be non-zero.
+ self.assertTrue(all(d.incarnation != 0 for d in devices))
if __name__ == '__main__':
diff --git a/tensorflow/python/client/session_test.py b/tensorflow/python/client/session_test.py
index b72e029d1c..052be68385 100644
--- a/tensorflow/python/client/session_test.py
+++ b/tensorflow/python/client/session_test.py
@@ -35,6 +35,7 @@ from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
+from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
@@ -104,18 +105,20 @@ class SessionTest(test_util.TensorFlowTestCase):
copy_val)
def testManyCPUs(self):
- # TODO(keveman): Implement ListDevices and test for the number of
- # devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(device_count={
- 'CPU': 2
- })):
+ 'CPU': 2, 'GPU': 0
+ })) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
+ devices = sess.list_devices()
+ self.assertEqual(2, len(devices))
+ for device in devices:
+ self.assertEqual('CPU', framework_device_lib.DeviceSpec.from_string(
+ device.name).device_type)
+
def testPerSessionThreads(self):
- # TODO(keveman): Implement ListDevices and test for the number of
- # devices returned by ListDevices.
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
@@ -1868,19 +1871,21 @@ class SessionTest(test_util.TensorFlowTestCase):
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
- '/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337)
+ '/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
+ self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
- 'TYPE', 1337)
+ 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
+ self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
diff --git a/tensorflow/python/client/tf_session.i b/tensorflow/python/client/tf_session.i
index 985cb90436..1cdd8e0b6a 100644
--- a/tensorflow/python/client/tf_session.i
+++ b/tensorflow/python/client/tf_session.i
@@ -138,6 +138,11 @@ tensorflow::ImportNumpy();
$result = PyLong_FromLongLong($1);
}
+// Convert TF_DeviceListIncarnation uint64_t output to Python integer
+%typemap(out) uint64_t {
+ $result = PyLong_FromUnsignedLongLong($1);
+}
+
// We use TF_OperationGetControlInputs_wrapper instead of
// TF_OperationGetControlInputs
%ignore TF_OperationGetControlInputs;
diff --git a/tensorflow/python/compat/BUILD b/tensorflow/python/compat/BUILD
index 58ceafca06..e0a1c8e057 100644
--- a/tensorflow/python/compat/BUILD
+++ b/tensorflow/python/compat/BUILD
@@ -9,6 +9,7 @@ py_library(
srcs = ["compat.py"],
srcs_version = "PY2AND3",
visibility = ["//tensorflow:internal"],
+ deps = ["//tensorflow/python:util"],
)
tf_py_test(
diff --git a/tensorflow/python/compat/compat.py b/tensorflow/python/compat/compat.py
index 68a6421c2c..247ea7349d 100644
--- a/tensorflow/python/compat/compat.py
+++ b/tensorflow/python/compat/compat.py
@@ -24,13 +24,17 @@ from __future__ import print_function
import datetime
from tensorflow.python.util import tf_contextlib
+from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2018, 8, 1)
+@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
+ See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
+
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
@@ -82,10 +86,13 @@ def forward_compatible(year, month, day):
return _FORWARD_COMPATIBILITY_HORIZON > datetime.date(year, month, day)
+@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
+ See @{$guide/version_compat#backward_and_partial_forward_compatibility}.
+
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
diff --git a/tensorflow/python/data/ops/iterator_ops.py b/tensorflow/python/data/ops/iterator_ops.py
index 35de2f2841..3ef22cf981 100644
--- a/tensorflow/python/data/ops/iterator_ops.py
+++ b/tensorflow/python/data/ops/iterator_ops.py
@@ -499,7 +499,8 @@ class EagerIterator(object):
"tf.data.Dataset.make_initializable_iterator or "
"tf.data.Dataset.make_one_shot_iterator for graph construction".
format(type(self)))
- with ops.device("/device:CPU:0"):
+ self._device = context.context().device_name
+ with ops.device("/cpu:0"):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_classes = dataset.output_classes
self._output_types = dataset.output_types
@@ -508,14 +509,14 @@ class EagerIterator(object):
sparse.as_dense_types(self._output_types, self._output_classes))
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._output_shapes, self._output_classes))
- self._resource = gen_dataset_ops.anonymous_iterator(
- output_types=self._flat_output_types,
- output_shapes=self._flat_output_shapes)
- gen_dataset_ops.make_iterator(ds_variant, self._resource)
- # Delete the resource when this object is deleted
- self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
- handle=self._resource, handle_device="/device:CPU:0")
- self._device = context.context().device_name
+ with ops.colocate_with(ds_variant):
+ self._resource = gen_dataset_ops.anonymous_iterator(
+ output_types=self._flat_output_types,
+ output_shapes=self._flat_output_shapes)
+ gen_dataset_ops.make_iterator(ds_variant, self._resource)
+ # Delete the resource when this object is deleted
+ self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
+ handle=self._resource, handle_device=self._device)
def __iter__(self):
return self
diff --git a/tensorflow/python/data/util/nest.py b/tensorflow/python/data/util/nest.py
index 32e08021dc..1b596bdfc0 100644
--- a/tensorflow/python/data/util/nest.py
+++ b/tensorflow/python/data/util/nest.py
@@ -13,7 +13,6 @@
# limitations under the License.
# ==============================================================================
-# TODO(shivaniagrawal): Merge with core nest
"""## Functions for working with arbitrarily nested sequences of elements.
NOTE(mrry): This fork of the `tensorflow.python.util.nest` module
diff --git a/tensorflow/python/debug/examples/examples_test.sh b/tensorflow/python/debug/examples/examples_test.sh
index 2d35b2d8bb..f7d597c8c0 100755
--- a/tensorflow/python/debug/examples/examples_test.sh
+++ b/tensorflow/python/debug/examples/examples_test.sh
@@ -99,7 +99,7 @@ if [[ -d "${CUSTOM_DUMP_ROOT}" ]]; then
fi
# Test debugging of tf.keras.
-cat << EOF | "${DEBUG_KERAS_BIN}" --debug --ui_type=readline
+cat << EOF | ${DEBUG_KERAS_BIN} --debug --ui_type=readline
run -f has_inf_or_nan
EOF
diff --git a/tensorflow/python/eager/BUILD b/tensorflow/python/eager/BUILD
index 6ede8e4f4d..32a8452f62 100644
--- a/tensorflow/python/eager/BUILD
+++ b/tensorflow/python/eager/BUILD
@@ -322,6 +322,7 @@ cuda_py_test(
"//tensorflow/python:math_ops",
"//tensorflow/python:pywrap_tensorflow",
"//tensorflow/python:random_ops",
+ "//tensorflow/python/keras",
],
)
@@ -404,6 +405,7 @@ cuda_py_test(
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_test_lib",
+ "@six_archive//:six",
],
tags = [
"optonly", # The test is too slow in non-opt mode
diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py
index 9e0bbce4a1..c59ad09bf1 100644
--- a/tensorflow/python/eager/backprop.py
+++ b/tensorflow/python/eager/backprop.py
@@ -599,15 +599,18 @@ def _fast_fill(value, shape, dtype):
def _zeros(shape, dtype):
- """Wraps array_ops.zeros to cache last zero for a given shape and dtype."""
- device = context.context().device_name
+ """Helper to return (possibly cached) zero tensors in eager mode."""
if dtype == dtypes.variant:
# TODO(apassos): need to save enough information about variant tensors to do
# a zeros
return None
- # pylint: disable=protected-access
- cache_key = shape, dtype, device, context.context()._eager_context.mode
- # pylint: enable=protected-access
+
+ ctx = context.context()
+ if not ctx.executing_eagerly():
+ return array_ops.zeros(shape, dtype)
+
+ device = ctx.device_name
+ cache_key = shape, dtype, device
cached = _zeros_cache.get(cache_key)
if cached is None:
cached = _fast_fill(0, shape, dtype)
@@ -616,6 +619,9 @@ def _zeros(shape, dtype):
def _ones(shape, dtype):
+ if not context.context().executing_eagerly():
+ return array_ops.ones(shape, dtype)
+
if shape == (): # pylint: disable=g-explicit-bool-comparison
return constant_op.constant(1, dtype=dtype)
return _fast_fill(1, shape, dtype)
@@ -643,10 +649,10 @@ class GradientTape(object):
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
- Trainable variables (created by `tf.contrib.eager.Variable` or
- @{tf.get_variable}, trainable=True is default in both cases) are automatically
- watched. Tensors can be manually watched by invoking the `watch` method on
- this context manager.
+ Trainable variables (created by `tf.Variable` or @{tf.get_variable},
+ trainable=True is default in both cases) are automatically watched. Tensors
+ can be manually watched by invoking the `watch` method on this context
+ manager.
For example, consider the function `y = x * x`. The gradient at `x = 3.0` can
be computed as:
diff --git a/tensorflow/python/eager/backprop_test.py b/tensorflow/python/eager/backprop_test.py
index bdda200ff6..3d3f54b9c4 100644
--- a/tensorflow/python/eager/backprop_test.py
+++ b/tensorflow/python/eager/backprop_test.py
@@ -96,6 +96,19 @@ class BackpropTest(test.TestCase):
self.assertAllEqual(grads_and_vars[0][0], 1.0)
self.assertAllEqual(id(grads_and_vars[0][1]), id(x))
+ def testGradientInsideLoop(self):
+ with ops.Graph().as_default():
+ v = resource_variable_ops.ResourceVariable(1.0)
+
+ def body(_):
+ _ = v + 1.0 # This reads the variable inside the loop context
+ with backprop.GradientTape() as t:
+ result = v * 2
+ self.assertTrue(t.gradient(result, v) is not None)
+ return 1.0
+
+ control_flow_ops.while_loop(lambda i: False, body, [1.0])
+
def testWhereGradient(self):
# Note: where is special because only some of its arguments are of
# differentiable dtypes.
@@ -912,32 +925,23 @@ class BackpropTest(test.TestCase):
'did you forget to return a value from fn?'):
val_and_grads_fn(x, y)
- def testZerosCacheDoesntLeakAcrossModes(self):
- with ops.Graph().as_default():
- t = random_ops.random_normal(shape=[100, 2])
- x = random_ops.random_normal(shape=[100, 4])
- dy = random_ops.random_normal(shape=[100, 4])
- with backprop.GradientTape() as gradient_tape:
- gradient_tape.watch(x)
- x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
- y1 = x1 ** 2.
- y = array_ops.concat([y1, t], axis=1)
-
- dx = gradient_tape.gradient(y, x, output_gradients=dy)
- with self.test_session() as sess:
- sess.run(variables.global_variables_initializer())
- sess.run(dx)
-
- t = random_ops.random_normal(shape=[100, 2])
- x = random_ops.random_normal(shape=[100, 4])
- dy = random_ops.random_normal(shape=[100, 4])
- with backprop.GradientTape() as gradient_tape:
- gradient_tape.watch(x)
- x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
- y1 = x1 ** 2.
- y = array_ops.concat([y1, t], axis=1)
-
- dx = gradient_tape.gradient(y, x, output_gradients=dy)
+ def testZerosCacheDoesntLeakAcrossGraphs(self):
+ with context.graph_mode():
+ def get_grad():
+ with ops.Graph().as_default(), self.test_session():
+ t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4))
+ x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4))
+ with backprop.GradientTape() as gt:
+ tape.watch(x)
+ x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
+ y1 = x1**2
+ y = array_ops.concat([y1, t], axis=1)
+ return self.evaluate(gt.gradient(y, x))
+
+ grad1 = get_grad()
+ grad2 = get_grad()
+
+ self.assertAllEqual(grad1, grad2)
if __name__ == '__main__':
diff --git a/tensorflow/python/eager/benchmarks_test.py b/tensorflow/python/eager/benchmarks_test.py
index 3aad4a114a..afc4bf0066 100644
--- a/tensorflow/python/eager/benchmarks_test.py
+++ b/tensorflow/python/eager/benchmarks_test.py
@@ -31,6 +31,7 @@ import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
+from tensorflow.python import keras
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import backprop # pylint: disable=unused-import
from tensorflow.python.eager import context
@@ -70,6 +71,25 @@ def c_tfe_py_fastpath_execute(a,
six.raise_from(core._status_to_exception(e.code, message), None)
+class SubclassedKerasModel(keras.Model):
+
+ def __init__(self):
+ super(SubclassedKerasModel, self).__init__()
+ self.layer = keras.layers.Dense(
+ 10, kernel_initializer="ones", bias_initializer="zeros")
+
+ def call(self, x):
+ return self.layer(x)
+
+
+def make_keras_model():
+ x = keras.Input(shape=(10,))
+ y = keras.layers.Dense(
+ 10, kernel_initializer="ones", bias_initializer="zeros")(
+ x)
+ return keras.Model(inputs=x, outputs=y)
+
+
class MicroBenchmarks(test.Benchmark):
def __init__(self):
@@ -115,6 +135,7 @@ class MicroBenchmarks(test.Benchmark):
def func():
ops.EagerTensor(value, context=handle, device=device, dtype=dtype)
+
self._run(func, 30000)
def benchmark_create_float_tensor_from_list_CPU(self):
@@ -211,8 +232,8 @@ class MicroBenchmarks(test.Benchmark):
inputs = [m]
def f():
- pywrap_tensorflow.TFE_Py_Execute(
- ctx_handle, None, "Identity", inputs, attrs, 1)
+ pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs,
+ attrs, 1)
self._run(f, 30000)
@@ -234,14 +255,13 @@ class MicroBenchmarks(test.Benchmark):
def f():
with backprop.GradientTape():
pass
+
self._run(f, 30000)
def benchmark_tf_gradient_function_no_op(self):
with context.device(CPU):
m = gen_array_ops.identity(self._m_2)
- self._run(
- lambda: backprop.gradients_function(lambda x: x, [0])(m),
- 30000)
+ self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
def _benchmark_np_matmul(self, m, transpose_b, num_iters):
a = m.cpu().numpy()
@@ -255,6 +275,7 @@ class MicroBenchmarks(test.Benchmark):
self._run(func, num_iters, execution_mode=execution_mode)
def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters):
+
def func():
gen_math_ops.mat_mul(m, m, transpose_b=transpose_b)
@@ -276,9 +297,10 @@ class MicroBenchmarks(test.Benchmark):
device = context.context().device_name
attrs = ("transpose_a", False, "transpose_b", transpose_b, "T",
m.dtype.as_datatype_enum)
+
def func():
- pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul",
- inputs, attrs, 1)
+ pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs,
+ attrs, 1)
self._run(func, num_iters)
@@ -542,6 +564,30 @@ class MicroBenchmarks(test.Benchmark):
self._benchmark_read_variable_with_tape(
m, num_iters=self._num_iters_2_by_2)
+ def benchmark_keras_model_subclassed(self):
+ model = SubclassedKerasModel()
+ data = random_ops.random_uniform((10, 10))
+
+ func = lambda: model(data)
+ # First call is more expensive (creates variables etc.), discount that.
+ func()
+
+ # The whole point of this test is to contrast subclassing with
+ # the functional style of keras model building, so validate that
+ # the models are equivalent.
+ assert np.equal(func(), make_keras_model()(data)).all()
+
+ self._run(func, 30000)
+
+ def benchmark_keras_model_functional(self):
+ model = make_keras_model()
+ data = random_ops.random_uniform((10, 10))
+ func = lambda: model(data)
+ # Symmetry with benchmark_keras_model_subclassed
+ func()
+ assert np.equal(func(), SubclassedKerasModel()(data)).all()
+ self._run(func, 30000)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/eager/context.py b/tensorflow/python/eager/context.py
index 85b9491903..495a674526 100644
--- a/tensorflow/python/eager/context.py
+++ b/tensorflow/python/eager/context.py
@@ -177,6 +177,11 @@ class Context(object):
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
+ server_def: (Optional.) A tensorflow::ServerDef proto.
+ Enables execution on remote devices. GrpcServers need to be started by
+ creating an identical server_def to this, and setting the appropriate
+ task_indexes, so that the servers can communicate. It will then be
+ possible to execute operations on remote devices.
Raises:
ValueError: If execution_mode is not valid.
diff --git a/tensorflow/python/eager/core_test.py b/tensorflow/python/eager/core_test.py
index 3fabe7060e..cc765725a4 100644
--- a/tensorflow/python/eager/core_test.py
+++ b/tensorflow/python/eager/core_test.py
@@ -610,6 +610,14 @@ class TFETest(test_util.TensorFlowTestCase):
self.assertEquals(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
+ def testConvertMixedEagerTensorsWithVariables(self):
+ var = resource_variable_ops.ResourceVariable(1.0)
+ types, tensors = execute_lib.convert_to_mixed_eager_tensors(
+ ['foo', var], context.context())
+ self.assertAllEqual([dtypes.string, dtypes.float32], types)
+ for t in tensors:
+ self.assertIsInstance(t, ops.EagerTensor)
+
class SendRecvTest(test_util.TensorFlowTestCase):
diff --git a/tensorflow/python/eager/execute.py b/tensorflow/python/eager/execute.py
index 2ff5b8d8f4..f9b8d2cb5d 100644
--- a/tensorflow/python/eager/execute.py
+++ b/tensorflow/python/eager/execute.py
@@ -198,11 +198,7 @@ def args_to_matching_eager(l, ctx, default_dtype=None):
def convert_to_mixed_eager_tensors(values, ctx):
- v = [
- t if isinstance(t, ops.EagerTensor) else ops.EagerTensor(
- t, context=ctx._handle, device=ctx.device_name) # pylint: disable=protected-access
- for t in values
- ]
+ v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py
index df83d673ad..5e4f9e29da 100644
--- a/tensorflow/python/eager/function.py
+++ b/tensorflow/python/eager/function.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import collections
import functools
+import threading
import numpy as np
@@ -137,7 +138,7 @@ class CapturingGraph(ops.Graph):
inputs[i] = self.capture(inp)
return super(CapturingGraph, self).create_op(
op_type, inputs, dtypes, input_types, name, attrs, op_def,
- compute_shapes, compute_device)
+ compute_device=compute_device)
# pylint: disable=invalid-name
@@ -469,37 +470,39 @@ class GraphModeFunction(object):
def _construct_backprop_function(self):
"""Constructs the backprop function object for this function."""
- with self._graph.as_default(), context.graph_mode():
- c_known_ops = set()
- c_captured_tensors = set()
-
- existing_op_len = len(self._graph.get_operations())
- filtered_outputs = [x for x in self._python_returns if x is not None]
+ filtered_outputs = [x for x in self._python_returns if x is not None]
+ captures = {}
+ backwards_graph = CapturingGraph(captures)
+ backwards_graph._graph_key = self._graph._graph_key # pylint: disable=protected-access
+ for collection in self._graph.collections:
+ backwards_graph.get_collection_ref(
+ collection)[:] = self._graph.get_collection(collection)
+ backwards_graph.seed = self._graph.seed
+ with backwards_graph.as_default():
self._out_grad_placeholders = [
graph_placeholder(x.dtype, x.shape) for x in filtered_outputs]
- in_gradients = gradients_impl.gradients(
+ in_gradients = gradients_impl._GradientsHelper( # pylint: disable=protected-access
filtered_outputs,
self._input_placeholders,
- grad_ys=self._out_grad_placeholders)
- for op in self._graph.get_operations()[existing_op_len:]:
- if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
- raise ValueError("defun cannot capture variables created without "
- "using tf.get_variable. Op: %s" % op)
- c_known_ops.add(op)
- for i in op.inputs:
- if i.op not in c_known_ops:
- c_captured_tensors.add(i)
+ grad_ys=self._out_grad_placeholders,
+ src_graph=self._graph)
backward_outputs = tuple(
grad for grad in _flatten(in_gradients) if grad is not None)
output_shapes = tuple(grad.shape for grad in backward_outputs)
- captures = list(sorted(c_captured_tensors, key=lambda x: x.name))
+ ids = list(sorted(captures.keys()))
+ if ids:
+ extra_inputs, extra_placeholders = zip(*[captures[x] for x in ids])
+ else:
+ extra_inputs = []
+ extra_placeholders = []
+
forward_name = _forward_name(self._func_name)
self._forward_fdef = _EagerDefinedFunction(
forward_name, self._graph, self._ops, self._input_placeholders,
- filtered_outputs + captures, self._attrs)
- all_inputs = self._out_grad_placeholders + captures
+ filtered_outputs + list(extra_inputs), self._attrs)
+ all_inputs = self._out_grad_placeholders + list(extra_placeholders)
# Excluding input ops from the body as we do not intend to execute these
# operations when the function is executed.
all_ignored_ops = frozenset(x.op for x in all_inputs)
@@ -507,11 +510,12 @@ class GraphModeFunction(object):
# means rerunning the function-defining code will always define the same
# function, which is useful if we serialize this etc.
function_def_ops = tuple(x
- for x in sorted(c_known_ops, key=lambda x: x.name)
+ for x in sorted(backwards_graph.get_operations(),
+ key=lambda x: x.name)
if x not in all_ignored_ops)
bname = _backward_name(self._func_name)
self._backward_function = GraphModeFunction(
- bname, all_inputs, [], self._graph, function_def_ops,
+ bname, all_inputs, [], backwards_graph, function_def_ops,
backward_outputs, in_gradients, output_shapes, attrs=self._attrs)
def _backprop_call(self, args):
@@ -656,55 +660,58 @@ def _deterministic_dict_values(kwds):
def _trace_and_define_function(name, func, compiled, args, kwds):
"""Defines and returns graph-mode version of func."""
graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
- with context.graph_mode():
- captures = {}
- tmp_graph = CapturingGraph(captures)
- # Inherit the graph key, since this is used for matching variables in
- # optimizers.
- tmp_graph._graph_key = graph_key # pylint: disable=protected-access
- # Copy the graph collections to ensure summaries and other things work. This
- # lets the function access (but not mutate) collections of the containing
- # graph, such as the global step and the summary writer collections.
- curr_graph = ops.get_default_graph()
- for collection in curr_graph.collections:
- tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
- collection)
- with tmp_graph.as_default(), AutomaticControlDependencies() as a:
- func_args = _get_defun_inputs(args)
- func_kwds = _get_defun_inputs(kwds)
-
- def convert(x):
- if x is None:
- return None
- x = ops.convert_to_tensor_or_indexed_slices(x)
- x = a.mark_as_return(x)
- return x
-
- this_tape = tape.push_new_tape()
- try:
- func_outputs = func(*func_args, **func_kwds)
- func_outputs = nest.map_structure(convert, func_outputs)
- finally:
- tape.pop_tape(this_tape)
- variables = this_tape.watched_variables()
-
- # Returning a closed-over tensor as an output does not trigger a
- # call to convert_to_tensor, so we manually capture all such tensors.
- outputs_list = _flatten(func_outputs)
- func_def_outputs = [
- tmp_graph.capture(x) for x in outputs_list
- if x is not None
- ]
-
- ids = list(sorted(captures.keys()))
- if ids:
- extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
- else:
- extra_inputs = []
- extra_placeholders = []
- output_shapes = tuple(
- x.shape if isinstance(x, ops.Tensor) else None
- for x in func_def_outputs)
+ captures = {}
+ tmp_graph = CapturingGraph(captures)
+ # Inherit the graph key, since this is used for matching variables in
+ # optimizers.
+ tmp_graph._graph_key = graph_key # pylint: disable=protected-access
+ # Copy the graph collections to ensure summaries and other things work. This
+ # lets the function access (but not mutate) collections of the containing
+ # graph, such as the global step and the summary writer collections.
+ curr_graph = ops.get_default_graph()
+ for collection in curr_graph.collections:
+ tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
+ collection)
+ if context.executing_eagerly():
+ tmp_graph.seed = context.global_seed()
+ else:
+ tmp_graph.seed = curr_graph.seed
+ with tmp_graph.as_default(), AutomaticControlDependencies() as a:
+ func_args = _get_defun_inputs(args)
+ func_kwds = _get_defun_inputs(kwds)
+
+ def convert(x):
+ if x is None:
+ return None
+ x = ops.convert_to_tensor_or_indexed_slices(x)
+ x = a.mark_as_return(x)
+ return x
+
+ this_tape = tape.push_new_tape()
+ try:
+ func_outputs = func(*func_args, **func_kwds)
+ func_outputs = nest.map_structure(convert, func_outputs)
+ finally:
+ tape.pop_tape(this_tape)
+ variables = this_tape.watched_variables()
+
+ # Returning a closed-over tensor as an output does not trigger a
+ # call to convert_to_tensor, so we manually capture all such tensors.
+ outputs_list = _flatten(func_outputs)
+ func_def_outputs = [
+ tmp_graph.capture(x) for x in outputs_list
+ if x is not None
+ ]
+
+ ids = list(sorted(captures.keys()))
+ if ids:
+ extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
+ else:
+ extra_inputs = []
+ extra_placeholders = []
+ output_shapes = tuple(
+ x.shape if isinstance(x, ops.Tensor) else None
+ for x in func_def_outputs)
func_kwds_values = _deterministic_dict_values(func_kwds)
flat_inputs = [
@@ -770,6 +777,11 @@ class _PolymorphicFunction(object):
See the documentation for `defun` for more information on the semantics of
defined functions.
+
+ _PolymorphicFunction class is thread-compatible meaning that minimal
+ usage of defuns (defining and calling) is thread-safe, but if users call other
+ methods or invoke the base `python_function` themselves, external
+ synchronization is necessary.
"""
def __init__(self, python_function, name, compiled=False):
@@ -787,6 +799,8 @@ class _PolymorphicFunction(object):
self._arguments_to_functions = {}
self._variables = []
+ self._lock = threading.Lock()
+
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
@@ -825,15 +839,16 @@ class _PolymorphicFunction(object):
# signature so we don't improperly capture tensors such as variables.
signature += tuple([context.executing_eagerly() or ops.get_default_graph()])
- if signature not in self._arguments_to_functions:
- graph_function = _trace_and_define_function(
- self._name, self._python_function, self._compiled, args, kwds)
- self._arguments_to_functions[signature] = graph_function
- self._variables.extend(
- [v for v in graph_function.variables if v not in self._variables])
- return graph_function, inputs
- else:
- return self._arguments_to_functions[signature], inputs
+ with self._lock:
+ if signature not in self._arguments_to_functions:
+ graph_function = _trace_and_define_function(
+ self._name, self._python_function, self._compiled, args, kwds)
+ self._arguments_to_functions[signature] = graph_function
+ self._variables.extend(
+ [v for v in graph_function.variables if v not in self._variables])
+ return graph_function, inputs
+ else:
+ return self._arguments_to_functions[signature], inputs
def __call__(self, *args, **kwds):
"""Calls a graph function specialized for this input signature."""
@@ -1065,7 +1080,7 @@ def defun(func=None, compiled=False):
tf.enable_eager_execution()
def fn():
- x = tf.contrib.eager.Variable(0.0)
+ x = tf.Variable(0.0)
x.assign_add(1.0)
return x.read_value()
@@ -1082,19 +1097,18 @@ def defun(func=None, compiled=False):
```
Finally, because each input signature is bound to a unique graph, if your
- Python function constructs `tf.contrib.eager.Variable` objects, then each
- graph constructed for that Python function will reference a unique set of
- variables. To circumvent this problem, we recommend against compiling Python
- functions that create `tf.contrib.eager.Variable` objects. Instead, Python
- functions should either lexically close over `tf.contrib.eager.Variable`
- objects or accept them as arguments, preferably encapsulated in an
- object-oriented container. If you must create variables inside your Python
- function and you want each graph generated for it to reference the same set of
- variables, add logic to your Python function that ensures that variables are
- only created the first time it is called and are reused for every subsequent
- invocation; note that this is precisely what @{tf.keras.layers.Layer} objects
- do, so we recommend using them to represent variable-bearing computations
- whenever possible.
+ Python function constructs `tf.Variable` objects, then each graph constructed
+ for that Python function will reference a unique set of variables. To
+ circumvent this problem, we recommend against compiling Python functions that
+ create `tf.Variable` objects. Instead, Python functions should either
+ lexically close over `tf.Variable` objects or accept them as arguments,
+ preferably encapsulated in an object-oriented container. If you must create
+ variables inside your Python function and you want each graph generated for it
+ to reference the same set of variables, add logic to your Python function that
+ ensures that variables are only created the first time it is called and are
+ reused for every subsequent invocation; note that this is precisely what
+ @{tf.keras.layers.Layer} objects do, so we recommend using them to represent
+ variable-bearing computations whenever possible.
Args:
func: function to be compiled. If `func` is None, returns a
diff --git a/tensorflow/python/eager/function_test.py b/tensorflow/python/eager/function_test.py
index a3e63c3153..2e86563a7d 100644
--- a/tensorflow/python/eager/function_test.py
+++ b/tensorflow/python/eager/function_test.py
@@ -30,6 +30,7 @@ from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import ops
+from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.layers import convolutional
@@ -39,10 +40,12 @@ from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
+from tensorflow.python.training import adam
from tensorflow.python.training import momentum
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
@@ -136,6 +139,18 @@ class FunctionTest(test.TestCase):
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
+ def disabled_testRandomSeed(self):
+
+ @function.defun
+ def f():
+ return random_ops.random_normal(())
+
+ random_seed.set_random_seed(1)
+ x = f()
+ self.assertNotEqual(x, f())
+ random_seed.set_random_seed(1)
+ self.assertAllEqual(f(), x)
+
def testNestedInputsDefunOpGraphMode(self):
matmul = function.defun(math_ops.matmul)
@@ -198,6 +213,19 @@ class FunctionTest(test.TestCase):
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
+ @test_util.run_in_graph_and_eager_modes()
+ def testDefunCondGradient(self):
+
+ @function.defun
+ def f(x):
+ return control_flow_ops.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x)
+
+ with backprop.GradientTape() as t:
+ x = constant_op.constant(1.0)
+ t.watch(x)
+ y = f(x)
+ self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0)
+
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@@ -1152,6 +1180,23 @@ class AutomaticControlDependenciesTest(test.TestCase):
value = train()
self.assertEqual(value.numpy(), -1.0)
+ # TODO(b/111663004): This should work when the outer context is graph
+ # building.
+ def testOptimizerNonSlotVarsInDefunNoError(self):
+ def loss(v):
+ return v**2
+
+ optimizer = adam.AdamOptimizer(learning_rate=1.0)
+
+ @function.defun
+ def train():
+ v = resource_variable_ops.ResourceVariable(1.0)
+ grad = backprop.implicit_grad(loss)(v)
+ optimizer.apply_gradients(grad)
+ return v.read_value()
+
+ train()
+
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
diff --git a/tensorflow/python/eager/memory_test.py b/tensorflow/python/eager/memory_test.py
index 74c6cbdd31..a1a59d511f 100644
--- a/tensorflow/python/eager/memory_test.py
+++ b/tensorflow/python/eager/memory_test.py
@@ -24,6 +24,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import six
+
from tensorflow.python import keras
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
@@ -63,7 +65,7 @@ class MemoryTest(test.TestCase):
initial = memory_profiler.memory_usage(-1)[0]
- for _ in xrange(num_iters):
+ for _ in six.moves.range(num_iters):
f()
increase = memory_profiler.memory_usage(-1)[0] - initial
diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc
index ea604647fa..cefd5b1206 100644
--- a/tensorflow/python/eager/pywrap_tensor.cc
+++ b/tensorflow/python/eager/pywrap_tensor.cc
@@ -620,10 +620,6 @@ static PyType_Slot EagerTensor_Type_slots[] = {
{Py_tp_init, reinterpret_cast<void*>(EagerTensor_init)},
{0, nullptr},
};
-
-PyType_Spec EagerTensor_Type_spec = {"EagerTensor", sizeof(EagerTensor), 0,
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE,
- EagerTensor_Type_slots};
#else
// TODO(agarwal): support active_trace.
static PyTypeObject _EagerTensorType = {
@@ -754,6 +750,34 @@ PyObject* TFE_Py_InitEagerTensor(PyObject* base_class) {
#if PY_MAJOR_VERSION >= 3
PyObject* bases = PyTuple_New(1);
PyTuple_SET_ITEM(bases, 0, base_class);
+
+ tensorflow::Safe_PyObjectPtr base_class_module(
+ PyObject_GetAttrString(base_class, "__module__"));
+ const char* module = nullptr;
+ if (PyErr_Occurred()) {
+ PyErr_Clear();
+ module = "__builtin__";
+ } else {
+ module = PyBytes_AsString(base_class_module.get());
+ if (module == nullptr) {
+ PyErr_Clear();
+ module = PyUnicode_AsUTF8(base_class_module.get());
+ if (module == nullptr) {
+ PyErr_Clear();
+ module = "__builtin__";
+ }
+ }
+ }
+
+ // NOTE: The c_str from this string needs to outlast the function, hence is
+ // static.
+ static tensorflow::string fully_qualified_name =
+ tensorflow::strings::StrCat(module, ".EagerTensor");
+
+ static PyType_Spec EagerTensor_Type_spec = {
+ fully_qualified_name.c_str(), sizeof(EagerTensor), 0,
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE, EagerTensor_Type_slots};
+
EagerTensorType = reinterpret_cast<PyTypeObject*>(
PyType_FromSpecWithBases(&EagerTensor_Type_spec, bases));
if (PyErr_Occurred()) {
diff --git a/tensorflow/python/eager/pywrap_tfe_src.cc b/tensorflow/python/eager/pywrap_tfe_src.cc
index ec7e2371e9..4d28e98961 100644
--- a/tensorflow/python/eager/pywrap_tfe_src.cc
+++ b/tensorflow/python/eager/pywrap_tfe_src.cc
@@ -1173,14 +1173,14 @@ static tensorflow::eager::TapeTensor TapeTensorFromTensor(PyObject* tensor) {
if (EagerTensor_CheckExact(tensor)) {
TFE_TensorHandle* t = EagerTensor_Handle(tensor);
tensorflow::int64 id = EagerTensor_id(tensor);
- const tensorflow::Tensor* tensor = nullptr;
- const tensorflow::Status status = t->handle->Tensor(&tensor);
+ tensorflow::TensorShape tensor_shape;
+ const tensorflow::Status status = t->handle->Shape(&tensor_shape);
+
if (MaybeRaiseExceptionFromStatus(status, nullptr)) {
return tensorflow::eager::TapeTensor{id, t->handle->dtype,
tensorflow::TensorShape({})};
} else {
- return tensorflow::eager::TapeTensor{id, t->handle->dtype,
- tensor->shape()};
+ return tensorflow::eager::TapeTensor{id, t->handle->dtype, tensor_shape};
}
}
tensorflow::int64 id = FastTensorId(tensor);
diff --git a/tensorflow/python/eager/tensor_test.py b/tensorflow/python/eager/tensor_test.py
index 626a4eb1ee..871136e2c8 100644
--- a/tensorflow/python/eager/tensor_test.py
+++ b/tensorflow/python/eager/tensor_test.py
@@ -278,7 +278,7 @@ class TFETensorUtilTest(test_util.TensorFlowTestCase):
with self.assertRaisesRegexp(
TypeError,
- r"tensors argument must be a list or a tuple. Got \"EagerTensor\""):
+ r"tensors argument must be a list or a tuple. Got.*EagerTensor"):
pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2)
def testNegativeSliceDim(self):
diff --git a/tensorflow/python/estimator/BUILD b/tensorflow/python/estimator/BUILD
index 8ee38d35cc..fd46163050 100644
--- a/tensorflow/python/estimator/BUILD
+++ b/tensorflow/python/estimator/BUILD
@@ -40,9 +40,9 @@ py_library(
srcs_version = "PY2AND3",
deps = [
":gc",
+ ":metric_keys",
+ ":util",
"//tensorflow:tensorflow_py_no_contrib",
- "//tensorflow/python/estimator:metric_keys",
- "//tensorflow/python/estimator:util",
],
)
@@ -683,9 +683,9 @@ py_test(
],
deps = [
":keras",
+ ":numpy_io",
+ ":run_config",
"//tensorflow:tensorflow_py_no_contrib",
- "//tensorflow/python/estimator:numpy_io",
- "//tensorflow/python/estimator:run_config",
"//third_party/py/numpy",
],
)
@@ -707,6 +707,14 @@ py_library(
)
py_library(
+ name = "expect_h5py_installed",
+ # This is a dummy rule used as a numpy dependency in open-source.
+ # We expect h5py to already be installed on the system, e.g. via
+ # `pip install h5py'
+ visibility = ["//visibility:public"],
+)
+
+py_library(
name = "expect_six_installed",
# This is a dummy rule used as a numpy dependency in open-source.
# We expect six to already be installed on the system, e.g. via
diff --git a/tensorflow/python/estimator/api/BUILD b/tensorflow/python/estimator/api/BUILD
index ceb9baef4d..a75fa7d0ae 100644
--- a/tensorflow/python/estimator/api/BUILD
+++ b/tensorflow/python/estimator/api/BUILD
@@ -6,8 +6,8 @@ package(
licenses(["notice"]) # Apache 2.0
-load("//tensorflow/tools/api/generator:api_gen.bzl", "gen_api_init_files")
-load("//tensorflow/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "gen_api_init_files")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
gen_api_init_files(
name = "estimator_python_api_gen",
diff --git a/tensorflow/python/estimator/canned/boosted_trees.py b/tensorflow/python/estimator/canned/boosted_trees.py
index 3c832c7569..3292e2724d 100644
--- a/tensorflow/python/estimator/canned/boosted_trees.py
+++ b/tensorflow/python/estimator/canned/boosted_trees.py
@@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import abc
import collections
import functools
@@ -384,6 +385,249 @@ class _StopAtAttemptsHook(session_run_hook.SessionRunHook):
run_context.request_stop()
+def _get_max_splits(tree_hparams):
+ """Calculates the max possible number of splits based on tree params."""
+ # maximum number of splits possible in the whole tree =2^(D-1)-1
+ max_splits = (1 << tree_hparams.max_depth) - 1
+ return max_splits
+
+
+class _EnsembleGrower(object):
+ """Abstract base class for different types of ensemble growers.
+
+ Use it to receive training ops for growing and centering bias, depending
+ on the implementation (for example, in memory or accumulator-based
+ distributed):
+ grower = ...create subclass grower(tree_ensemble, tree_hparams)
+ grow_op = grower.grow_tree(stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range)
+ training_ops.append(grow_op)
+ """
+
+ def __init__(self, tree_ensemble, tree_hparams):
+ """Initializes a grower object.
+
+ Args:
+ tree_ensemble: A TreeEnsemble variable.
+ tree_hparams: TODO. collections.namedtuple for hyper parameters.
+ """
+ self._tree_ensemble = tree_ensemble
+ self._tree_hparams = tree_hparams
+
+ @abc.abstractmethod
+ def center_bias(self, center_bias_var, gradients, hessians):
+ """Centers bias, if ready, based on statistics.
+
+ Args:
+ center_bias_var: A variable that will be updated when bias centering
+ finished.
+ gradients: A rank 2 tensor of gradients.
+ hessians: A rank 2 tensor of hessians.
+
+ Returns:
+ An operation for centering bias.
+ """
+
+ @abc.abstractmethod
+ def grow_tree(self, stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range):
+ """Grows a tree, if ready, based on provided statistics.
+
+ Args:
+ stats_summaries_list: List of stats summary tensors, representing sums of
+ gradients and hessians for each feature bucket.
+ feature_ids_list: a list of lists of feature ids for each bucket size.
+ last_layer_nodes_range: A tensor representing ids of the nodes in the
+ current layer, to be split.
+
+ Returns:
+ An op for growing a tree.
+ """
+
+ # ============= Helper methods ===========
+
+ def _center_bias_fn(self, center_bias_var, mean_gradients, mean_hessians):
+ """Updates the ensembles and cache (if needed) with logits prior."""
+ continue_centering = boosted_trees_ops.center_bias(
+ self._tree_ensemble.resource_handle,
+ mean_gradients=mean_gradients,
+ mean_hessians=mean_hessians,
+ l1=self._tree_hparams.l1,
+ l2=self._tree_hparams.l2)
+ return center_bias_var.assign(continue_centering)
+
+ def _grow_tree_from_stats_summaries(self, stats_summaries_list,
+ feature_ids_list, last_layer_nodes_range):
+ """Updates ensemble based on the best gains from stats summaries."""
+ node_ids_per_feature = []
+ gains_list = []
+ thresholds_list = []
+ left_node_contribs_list = []
+ right_node_contribs_list = []
+ all_feature_ids = []
+ assert len(stats_summaries_list) == len(feature_ids_list)
+
+ max_splits = _get_max_splits(self._tree_hparams)
+
+ for i, feature_ids in enumerate(feature_ids_list):
+ (numeric_node_ids_per_feature, numeric_gains_list,
+ numeric_thresholds_list, numeric_left_node_contribs_list,
+ numeric_right_node_contribs_list) = (
+ boosted_trees_ops.calculate_best_gains_per_feature(
+ node_id_range=last_layer_nodes_range,
+ stats_summary_list=stats_summaries_list[i],
+ l1=self._tree_hparams.l1,
+ l2=self._tree_hparams.l2,
+ tree_complexity=self._tree_hparams.tree_complexity,
+ min_node_weight=self._tree_hparams.min_node_weight,
+ max_splits=max_splits))
+
+ all_feature_ids += feature_ids
+ node_ids_per_feature += numeric_node_ids_per_feature
+ gains_list += numeric_gains_list
+ thresholds_list += numeric_thresholds_list
+ left_node_contribs_list += numeric_left_node_contribs_list
+ right_node_contribs_list += numeric_right_node_contribs_list
+
+ grow_op = boosted_trees_ops.update_ensemble(
+ # Confirm if local_tree_ensemble or tree_ensemble should be used.
+ self._tree_ensemble.resource_handle,
+ feature_ids=all_feature_ids,
+ node_ids=node_ids_per_feature,
+ gains=gains_list,
+ thresholds=thresholds_list,
+ left_node_contribs=left_node_contribs_list,
+ right_node_contribs=right_node_contribs_list,
+ learning_rate=self._tree_hparams.learning_rate,
+ max_depth=self._tree_hparams.max_depth,
+ pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
+ return grow_op
+
+
+class _InMemoryEnsembleGrower(_EnsembleGrower):
+ """A base class for ensemble growers."""
+
+ def __init__(self, tree_ensemble, tree_hparams):
+
+ super(_InMemoryEnsembleGrower, self).__init__(
+ tree_ensemble=tree_ensemble, tree_hparams=tree_hparams)
+
+ def center_bias(self, center_bias_var, gradients, hessians):
+ # For in memory, we already have a full batch of gradients and hessians,
+ # so just take a mean and proceed with centering.
+ mean_gradients = array_ops.expand_dims(
+ math_ops.reduce_mean(gradients, 0), 0)
+ mean_heassians = array_ops.expand_dims(math_ops.reduce_mean(hessians, 0), 0)
+ return self._center_bias_fn(center_bias_var, mean_gradients, mean_heassians)
+
+ def grow_tree(self, stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range):
+ # For in memory, we already have full data in one batch, so we can grow the
+ # tree immediately.
+ return self._grow_tree_from_stats_summaries(
+ stats_summaries_list, feature_ids_list, last_layer_nodes_range)
+
+
+class _AccumulatorEnsembleGrower(_EnsembleGrower):
+ """A base class for ensemble growers."""
+
+ def __init__(self, tree_ensemble, tree_hparams, stamp_token,
+ n_batches_per_layer, bucket_size_list, is_chief):
+ super(_AccumulatorEnsembleGrower, self).__init__(
+ tree_ensemble=tree_ensemble, tree_hparams=tree_hparams)
+ self._stamp_token = stamp_token
+ self._n_batches_per_layer = n_batches_per_layer
+ self._bucket_size_list = bucket_size_list
+ self._is_chief = is_chief
+
+ def center_bias(self, center_bias_var, gradients, hessians):
+ # For not in memory situation, we need to accumulate enough of batches first
+ # before proceeding with centering bias.
+
+ # Create an accumulator.
+ bias_dependencies = []
+ bias_accumulator = data_flow_ops.ConditionalAccumulator(
+ dtype=dtypes.float32,
+ # The stats consist of grads and hessians means only.
+ # TODO(nponomareva): this will change for a multiclass
+ shape=[2, 1],
+ shared_name='bias_accumulator')
+
+ grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
+ grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
+
+ apply_grad = bias_accumulator.apply_grad(grads_and_hess, self._stamp_token)
+ bias_dependencies.append(apply_grad)
+
+ # Center bias if enough batches were processed.
+ with ops.control_dependencies(bias_dependencies):
+ if not self._is_chief:
+ return control_flow_ops.no_op()
+
+ def center_bias_from_accumulator():
+ accumulated = array_ops.unstack(bias_accumulator.take_grad(1), axis=0)
+ return self._center_bias_fn(center_bias_var,
+ array_ops.expand_dims(accumulated[0], 0),
+ array_ops.expand_dims(accumulated[1], 0))
+
+ center_bias_op = control_flow_ops.cond(
+ math_ops.greater_equal(bias_accumulator.num_accumulated(),
+ self._n_batches_per_layer),
+ center_bias_from_accumulator,
+ control_flow_ops.no_op,
+ name='wait_until_n_batches_for_bias_accumulated')
+ return center_bias_op
+
+ def grow_tree(self, stats_summaries_list, feature_ids_list,
+ last_layer_nodes_range):
+ # For not in memory situation, we need to accumulate enough of batches first
+ # before proceeding with building a tree layer.
+ max_splits = _get_max_splits(self._tree_hparams)
+
+ # Prepare accumulators.
+ accumulators = []
+ dependencies = []
+ for i, feature_ids in enumerate(feature_ids_list):
+ stats_summaries = stats_summaries_list[i]
+ accumulator = data_flow_ops.ConditionalAccumulator(
+ dtype=dtypes.float32,
+ # The stats consist of grads and hessians (the last dimension).
+ shape=[len(feature_ids), max_splits, self._bucket_size_list[i], 2],
+ shared_name='numeric_stats_summary_accumulator_' + str(i))
+ accumulators.append(accumulator)
+
+ apply_grad = accumulator.apply_grad(
+ array_ops.stack(stats_summaries, axis=0), self._stamp_token)
+ dependencies.append(apply_grad)
+
+ # Grow the tree if enough batches is accumulated.
+ with ops.control_dependencies(dependencies):
+ if not self._is_chief:
+ return control_flow_ops.no_op()
+
+ min_accumulated = math_ops.reduce_min(
+ array_ops.stack([acc.num_accumulated() for acc in accumulators]))
+
+ def grow_tree_from_accumulated_summaries_fn():
+ """Updates tree with the best layer from accumulated summaries."""
+ # Take out the accumulated summaries from the accumulator and grow.
+ stats_summaries_list = []
+ stats_summaries_list = [
+ array_ops.unstack(accumulator.take_grad(1), axis=0)
+ for accumulator in accumulators
+ ]
+ grow_op = self._grow_tree_from_stats_summaries(
+ stats_summaries_list, feature_ids_list, last_layer_nodes_range)
+ return grow_op
+
+ grow_model = control_flow_ops.cond(
+ math_ops.greater_equal(min_accumulated, self._n_batches_per_layer),
+ grow_tree_from_accumulated_summaries_fn,
+ control_flow_ops.no_op,
+ name='wait_until_n_batches_accumulated')
+ return grow_model
+
+
def _bt_model_fn(
features,
labels,
@@ -441,11 +685,6 @@ def _bt_model_fn(
raise ValueError('train_in_memory is supported only for '
'non-distributed training.')
worker_device = control_flow_ops.no_op().device
- # maximum number of splits possible in the whole tree =2^(D-1)-1
- # TODO(youngheek): perhaps storage could be optimized by storing stats with
- # the dimension max_splits_per_layer, instead of max_splits (for the entire
- # tree).
- max_splits = (1 << tree_hparams.max_depth) - 1
train_op = []
with ops.name_scope(name) as name:
# Prepare.
@@ -543,6 +782,11 @@ def _bt_model_fn(
hessians = gradients_impl.gradients(
gradients, logits, name='Hessians')[0]
+ # TODO(youngheek): perhaps storage could be optimized by storing stats
+ # with the dimension max_splits_per_layer, instead of max_splits (for the
+ # entire tree).
+ max_splits = _get_max_splits(tree_hparams)
+
stats_summaries_list = []
for i, feature_ids in enumerate(feature_ids_list):
num_buckets = bucket_size_list[i]
@@ -559,173 +803,28 @@ def _bt_model_fn(
]
stats_summaries_list.append(summaries)
- # ========= Helper methods for both in and not in memory. ==============
- def grow_tree_from_stats_summaries(stats_summaries_list,
- feature_ids_list):
- """Updates ensemble based on the best gains from stats summaries."""
- node_ids_per_feature = []
- gains_list = []
- thresholds_list = []
- left_node_contribs_list = []
- right_node_contribs_list = []
- all_feature_ids = []
-
- assert len(stats_summaries_list) == len(feature_ids_list)
-
- for i, feature_ids in enumerate(feature_ids_list):
- (numeric_node_ids_per_feature, numeric_gains_list,
- numeric_thresholds_list, numeric_left_node_contribs_list,
- numeric_right_node_contribs_list) = (
- boosted_trees_ops.calculate_best_gains_per_feature(
- node_id_range=last_layer_nodes_range,
- stats_summary_list=stats_summaries_list[i],
- l1=tree_hparams.l1,
- l2=tree_hparams.l2,
- tree_complexity=tree_hparams.tree_complexity,
- min_node_weight=tree_hparams.min_node_weight,
- max_splits=max_splits))
-
- all_feature_ids += feature_ids
- node_ids_per_feature += numeric_node_ids_per_feature
- gains_list += numeric_gains_list
- thresholds_list += numeric_thresholds_list
- left_node_contribs_list += numeric_left_node_contribs_list
- right_node_contribs_list += numeric_right_node_contribs_list
-
- grow_op = boosted_trees_ops.update_ensemble(
- # Confirm if local_tree_ensemble or tree_ensemble should be used.
- tree_ensemble.resource_handle,
- feature_ids=all_feature_ids,
- node_ids=node_ids_per_feature,
- gains=gains_list,
- thresholds=thresholds_list,
- left_node_contribs=left_node_contribs_list,
- right_node_contribs=right_node_contribs_list,
- learning_rate=tree_hparams.learning_rate,
- max_depth=tree_hparams.max_depth,
- pruning_mode=boosted_trees_ops.PruningMode.NO_PRUNING)
- return grow_op
-
- def _center_bias_fn(mean_gradients, mean_hessians):
- """Updates the ensembles and cache (if needed) with logits prior."""
- continue_centering = boosted_trees_ops.center_bias(
- tree_ensemble.resource_handle,
- mean_gradients=mean_gradients,
- mean_hessians=mean_hessians,
- l1=tree_hparams.l1,
- l2=tree_hparams.l2
- )
- return center_bias_var.assign(continue_centering)
-
- # ========= End of helper methods. ==============
-
if train_in_memory and is_single_machine:
- train_op.append(distribute_lib.increment_var(global_step))
-
- mean_gradients = array_ops.expand_dims(
- math_ops.reduce_mean(gradients, 0), 0)
- mean_heassians = array_ops.expand_dims(
- math_ops.reduce_mean(hessians, 0), 0)
-
- train_op.append(
- control_flow_ops.cond(
- center_bias_var,
- lambda: _center_bias_fn(mean_gradients, mean_heassians),
- functools.partial(grow_tree_from_stats_summaries,
- stats_summaries_list, feature_ids_list)))
+ grower = _InMemoryEnsembleGrower(tree_ensemble, tree_hparams)
else:
-
- def center_bias_not_in_mem():
- """Accumulates the data and updates the logits bias, when ready."""
- bias_dependencies = []
-
- bias_accumulator = data_flow_ops.ConditionalAccumulator(
- dtype=dtypes.float32,
- # The stats consist of grads and hessians means only.
- # TODO(nponomareva): this will change for a multiclass
- shape=[2, 1],
- shared_name='bias_accumulator')
-
- grads_and_hess = array_ops.stack([gradients, hessians], axis=0)
- grads_and_hess = math_ops.reduce_mean(grads_and_hess, axis=1)
-
- apply_grad = bias_accumulator.apply_grad(grads_and_hess, stamp_token)
- bias_dependencies.append(apply_grad)
-
- def center_bias_from_accumulator():
- accumulated = array_ops.unstack(
- bias_accumulator.take_grad(1), axis=0)
- return _center_bias_fn(
- array_ops.expand_dims(accumulated[0], 0),
- array_ops.expand_dims(accumulated[1], 0))
-
- with ops.control_dependencies(bias_dependencies):
- if config.is_chief:
- center_bias_op = control_flow_ops.cond(
- math_ops.greater_equal(bias_accumulator.num_accumulated(),
- n_batches_per_layer),
- center_bias_from_accumulator,
- control_flow_ops.no_op,
- name='wait_until_n_batches_for_bias_accumulated')
-
- return center_bias_op
- else:
- return control_flow_ops.no_op()
-
- def grow_not_in_mem():
- """Accumulates the data and grows a layer when ready."""
-
- accumulators = []
- dependencies = []
- for i, feature_ids in enumerate(feature_ids_list):
- stats_summaries = stats_summaries_list[i]
- accumulator = data_flow_ops.ConditionalAccumulator(
- dtype=dtypes.float32,
- # The stats consist of grads and hessians (the last dimension).
- shape=[len(feature_ids), max_splits, bucket_size_list[i], 2],
- shared_name='numeric_stats_summary_accumulator_' + str(i))
- accumulators.append(accumulator)
-
- apply_grad = accumulator.apply_grad(
- array_ops.stack(stats_summaries, axis=0), stamp_token)
- dependencies.append(apply_grad)
-
- def grow_tree_from_accumulated_summaries_fn():
- """Updates tree with the best layer from accumulated summaries."""
- # Take out the accumulated summaries from the accumulator and grow.
- stats_summaries_list = []
-
- stats_summaries_list = [
- array_ops.unstack(accumulator.take_grad(1), axis=0)
- for accumulator in accumulators
- ]
-
- grow_op = grow_tree_from_stats_summaries(stats_summaries_list,
- feature_ids_list)
- return grow_op
-
- with ops.control_dependencies(dependencies):
- if config.is_chief:
- min_accumulated = math_ops.reduce_min(
- array_ops.stack(
- [acc.num_accumulated() for acc in accumulators]))
-
- grow_model = control_flow_ops.cond(
- math_ops.greater_equal(min_accumulated, n_batches_per_layer),
- grow_tree_from_accumulated_summaries_fn,
- control_flow_ops.no_op,
- name='wait_until_n_batches_accumulated')
-
- return grow_model
- else:
- return control_flow_ops.no_op()
-
- update_model = control_flow_ops.cond(
- center_bias_var, center_bias_not_in_mem, grow_not_in_mem)
- train_op.append(update_model)
- with ops.control_dependencies([update_model]):
- increment_global = distribute_lib.increment_var(global_step)
- train_op.append(increment_global)
+ grower = _AccumulatorEnsembleGrower(tree_ensemble, tree_hparams,
+ stamp_token, n_batches_per_layer,
+ bucket_size_list, config.is_chief)
+
+ update_model = control_flow_ops.cond(
+ center_bias_var,
+ functools.partial(
+ grower.center_bias,
+ center_bias_var,
+ gradients,
+ hessians,
+ ),
+ functools.partial(grower.grow_tree, stats_summaries_list,
+ feature_ids_list, last_layer_nodes_range))
+ train_op.append(update_model)
+
+ with ops.control_dependencies([update_model]):
+ increment_global = distribute_lib.increment_var(global_step)
+ train_op.append(increment_global)
return control_flow_ops.group(train_op, name='train_op')
diff --git a/tensorflow/python/estimator/canned/metric_keys.py b/tensorflow/python/estimator/canned/metric_keys.py
index 4f7c849ba4..9d49240fea 100644
--- a/tensorflow/python/estimator/canned/metric_keys.py
+++ b/tensorflow/python/estimator/canned/metric_keys.py
@@ -47,3 +47,8 @@ class MetricKeys(object):
PROBABILITY_MEAN_AT_CLASS = 'probability_mean/class%d'
AUC_AT_CLASS = 'auc/class%d'
AUC_PR_AT_CLASS = 'auc_precision_recall/class%d'
+
+ # The following require a class name applied.
+ PROBABILITY_MEAN_AT_NAME = 'probability_mean/%s'
+ AUC_AT_NAME = 'auc/%s'
+ AUC_PR_AT_NAME = 'auc_precision_recall/%s'
diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py
index 350a95eea1..915ceeb98b 100644
--- a/tensorflow/python/estimator/estimator.py
+++ b/tensorflow/python/estimator/estimator.py
@@ -29,8 +29,6 @@ import six
from google.protobuf import message
from tensorflow.core.framework import summary_pb2
-from tensorflow.core.protobuf import config_pb2
-from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
@@ -216,11 +214,7 @@ class Estimator(object):
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
- rewrite_opts = rewriter_config_pb2.RewriterConfig(
- meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
- graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
- self._session_config = config_pb2.ConfigProto(
- allow_soft_placement=True, graph_options=graph_opts)
+ self._session_config = run_config.get_default_session_config()
else:
self._session_config = self._config.session_config
@@ -573,10 +567,16 @@ class Estimator(object):
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
+ # TPUEstimator is special cased (owned by TF).
+ if self.__class__.__name__ == 'TPUEstimator':
+ return
+
allowed_overrides = set([
'_call_input_fn', '_create_global_step',
'_convert_train_steps_to_hooks', '_convert_eval_steps_to_hooks',
- '_tf_api_names', '_estimator_api_names', '_estimator_api_constants',
+ '_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',
+ '_estimator_api_names_v1', '_estimator_api_constants',
+ '_estimator_api_constants_v1',
'_validate_features_in_predict_input',
'_call_model_fn', '_add_meta_graph_for_mode'
])
diff --git a/tensorflow/python/estimator/estimator_test.py b/tensorflow/python/estimator/estimator_test.py
index 2a0e4e7617..8bc410ba0b 100644
--- a/tensorflow/python/estimator/estimator_test.py
+++ b/tensorflow/python/estimator/estimator_test.py
@@ -28,6 +28,7 @@ import six
from google.protobuf import text_format
+from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator
@@ -203,6 +204,10 @@ class EstimatorConstructorTest(test.TestCase):
est = estimator.Estimator(model_fn=model_fn)
self.assertTrue(isinstance(est.config, run_config.RunConfig))
+ self.assertTrue(est._session_config.allow_soft_placement)
+ rewrite_options = est._session_config.graph_options.rewrite_options
+ self.assertEqual(rewrite_options.meta_optimizer_iterations,
+ rewriter_config_pb2.RewriterConfig.ONE)
def test_default_model_dir(self):
@@ -2304,6 +2309,43 @@ class EstimatorExportTest(test.TestCase):
with self.assertRaisesRegexp(ValueError, err_regex):
est._export_all_saved_models(export_dir_base, input_receiver_fn_map)
+ def test_export_all_saved_models_metric_operation(self):
+ """Ensures metrics ops.Operations can be expoerted (b/109740581)."""
+
+ def _model_fn(features, labels, mode):
+ del features, labels # Unused
+ metrics = {'metrics': (constant_op.constant([0]),
+ control_flow_ops.no_op())}
+ return model_fn_lib.EstimatorSpec(
+ mode,
+ predictions=constant_op.constant(10.),
+ loss=constant_op.constant(1.),
+ train_op=state_ops.assign_add(training.get_global_step(), 1),
+ eval_metric_ops=metrics)
+
+ tmpdir = tempfile.mkdtemp()
+ est = estimator.Estimator(model_fn=_model_fn)
+ est.train(input_fn=dummy_input_fn, steps=1)
+
+ # Perform the export.
+ export_dir_base = os.path.join(
+ compat.as_bytes(tmpdir), compat.as_bytes('metric_operation_export'))
+
+ input_receiver_fn_map = {
+ model_fn_lib.ModeKeys.EVAL: _get_supervised_input_receiver_fn()}
+
+ export_dir = est._export_all_saved_models(
+ export_dir_base, input_receiver_fn_map)
+
+ # Restore, to validate that the export was well-formed.
+ with ops.Graph().as_default() as graph:
+ with session.Session(graph=graph) as sess:
+ meta_graph = loader.load(sess, [tag_constants.EVAL], export_dir)
+ sig_outputs = meta_graph.signature_def[
+ model_fn_lib.ModeKeys.EVAL].outputs
+ self.assertEqual(
+ sig_outputs['metrics/update_op'].name, 'metric_op_wrapper:0')
+
def test_export_savedmodel_with_saveables_proto_roundtrip(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
diff --git a/tensorflow/python/estimator/export/export_output.py b/tensorflow/python/estimator/export/export_output.py
index 6c26d29985..20382a58d8 100644
--- a/tensorflow/python/estimator/export/export_output.py
+++ b/tensorflow/python/estimator/export/export_output.py
@@ -23,6 +23,7 @@ import abc
import six
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
@@ -338,8 +339,16 @@ class _SupervisedOutput(ExportOutput):
raise ValueError(
'{} update_op must be a Tensor or Operation; got {}.'.format(
key, metric_op))
+
+ # We must wrap any ops in a Tensor before export, as the SignatureDef
+ # proto expects tensors only. See b/109740581
+ metric_op_tensor = metric_op
+ if isinstance(metric_op, ops.Operation):
+ with ops.control_dependencies([metric_op]):
+ metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')
+
outputs[val_name] = metric_val
- outputs[op_name] = metric_op
+ outputs[op_name] = metric_op_tensor
return outputs
diff --git a/tensorflow/python/estimator/export/export_output_test.py b/tensorflow/python/estimator/export/export_output_test.py
index b21ba91b0f..d94c764fd7 100644
--- a/tensorflow/python/estimator/export/export_output_test.py
+++ b/tensorflow/python/estimator/export/export_output_test.py
@@ -24,8 +24,10 @@ from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
@@ -335,5 +337,18 @@ class SupervisedOutputTest(test.TestCase):
self.assertTrue("predictions/output1" in sig_def.outputs)
self.assertTrue("features" in sig_def.inputs)
+ def test_metric_op_is_operation(self):
+ """Tests that ops.Operation is wrapped by a tensor for metric_ops."""
+ loss = {"my_loss": constant_op.constant([0])}
+ predictions = {u"output1": constant_op.constant(["foo"])}
+ metrics = {"metrics": (constant_op.constant([0]), control_flow_ops.no_op())}
+
+ outputter = MockSupervisedOutput(loss, predictions, metrics)
+ self.assertEqual(outputter.metrics["metrics/value"], metrics["metrics"][0])
+ self.assertEqual(
+ outputter.metrics["metrics/update_op"].name, "metric_op_wrapper:0")
+ self.assertTrue(
+ isinstance(outputter.metrics["metrics/update_op"], ops.Tensor))
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/estimator/keras.py b/tensorflow/python/estimator/keras.py
index 076359b503..70517ae278 100644
--- a/tensorflow/python/estimator/keras.py
+++ b/tensorflow/python/estimator/keras.py
@@ -21,11 +21,14 @@ from __future__ import print_function
import os
import re
+import tempfile
+
from tensorflow.python.client import session
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import export as export_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config as run_config_lib
+from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
@@ -39,6 +42,7 @@ from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics as metrics_module
+from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import distribute as distribute_lib
@@ -180,7 +184,7 @@ def _in_place_subclassed_model_reset(model):
# Replace layers on the model with fresh layers
layers_to_names = {value: key for key, value in attributes_cache.items()}
original_layers = model._layers[:]
- model._layers = []
+ model._layers = data_structures.NoDependency([])
for layer in original_layers: # We preserve layer order.
config = layer.get_config()
# This will not work for nested subclassed models used as layers.
@@ -228,7 +232,8 @@ def _in_place_subclassed_model_reset(model):
]
for name in attributes_to_cache:
attributes_cache[name] = getattr(model, name)
- model._original_attributes_cache = attributes_cache
+ model._original_attributes_cache = data_structures.NoDependency(
+ attributes_cache)
# Reset built state
model.built = False
model.inputs = None
@@ -426,29 +431,34 @@ def _create_keras_model_fn(keras_model, custom_objects=None):
return model_fn
-def _save_first_checkpoint(keras_model, estimator, custom_objects,
- keras_weights):
+def _save_first_checkpoint(keras_model, custom_objects, config):
"""Save first checkpoint for the keras Estimator.
Args:
keras_model: an instance of compiled keras model.
- estimator: keras estimator.
custom_objects: Dictionary for custom objects.
- keras_weights: A flat list of Numpy arrays for weights of given keras_model.
+ config: Estimator config.
Returns:
- The model_fn for a keras Estimator.
+ The path where keras model checkpoint is saved.
"""
+ # save checkpoint into subdirectory to allow warm start
+ keras_model_dir = os.path.join(config.model_dir, 'keras')
# Load weights and save to checkpoint if there is no checkpoint
- latest_path = saver_lib.latest_checkpoint(estimator.model_dir)
+ latest_path = saver_lib.latest_checkpoint(keras_model_dir)
if not latest_path:
+ keras_weights = None
+ if _any_weight_initialized(keras_model):
+ keras_weights = keras_model.get_weights()
+ if not gfile.IsDirectory(keras_model_dir):
+ gfile.MakeDirs(keras_model_dir)
with ops.Graph().as_default():
- random_seed.set_random_seed(estimator.config.tf_random_seed)
+ random_seed.set_random_seed(config.tf_random_seed)
training_util.create_global_step()
model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model,
custom_objects)
# save to checkpoint
- with session.Session(config=estimator._session_config) as sess:
+ with session.Session(config=config.session_config) as sess:
if keras_weights:
model.set_weights(keras_weights)
# Make update ops and initialize all variables.
@@ -458,7 +468,46 @@ def _save_first_checkpoint(keras_model, estimator, custom_objects,
K._initialize_variables(sess)
# pylint: enable=protected-access
saver = saver_lib.Saver()
- saver.save(sess, os.path.join(estimator.model_dir, 'keras_model.ckpt'))
+ latest_path = os.path.join(keras_model_dir, 'keras_model.ckpt')
+ saver.save(sess, latest_path)
+ return latest_path
+
+
+def _maybe_overwrite_model_dir_and_session_config(config, model_dir):
+ """Overwrite estimator config by `model_dir` and `session_config` if needed.
+
+ Args:
+ config: Original estimator config.
+ model_dir: Estimator model checkpoint directory.
+
+ Returns:
+ Overwritten estimator config.
+
+ Raises:
+ ValueError: Model directory inconsistent between `model_dir` and `config`.
+ """
+
+ default_session_config = run_config_lib.get_default_session_config()
+ if isinstance(config, dict):
+ config = RunConfig(**config)
+ elif config is None:
+ config = RunConfig(session_config=default_session_config)
+ if config.session_config is None:
+ config = RunConfig.replace(config, session_config=default_session_config)
+
+ if model_dir is not None:
+ if (getattr(config, 'model_dir', None) is not None and
+ config.model_dir != model_dir):
+ raise ValueError(
+ "`model_dir` are set both in constructor and `RunConfig`, but with "
+ "different values. In constructor: '{}', in `RunConfig`: "
+ "'{}' ".format(model_dir, config.model_dir))
+ config = RunConfig.replace(config, model_dir=model_dir)
+ elif getattr(config, 'model_dir', None) is None:
+ model_dir = tempfile.mkdtemp()
+ config = RunConfig.replace(config, model_dir=model_dir)
+
+ return config
def model_to_estimator(keras_model=None,
@@ -517,45 +566,39 @@ def model_to_estimator(keras_model=None,
'Please compile the model with `model.compile()` '
'before calling `model_to_estimator()`.')
- if isinstance(config, dict):
- config = run_config_lib.RunConfig(**config)
+ config = _maybe_overwrite_model_dir_and_session_config(config, model_dir)
keras_model_fn = _create_keras_model_fn(keras_model, custom_objects)
- estimator = estimator_lib.Estimator(
- keras_model_fn, model_dir=model_dir, config=config)
-
- # Check if we need to call get_weights:
if _any_weight_initialized(keras_model):
- keras_weights = keras_model.get_weights()
# Warn if config passed to estimator tries to update GPUOptions. If a
# session has already been created, the GPUOptions passed to the first
# session sticks.
- if estimator._session_config.HasField('gpu_options'):
+ if config.session_config.HasField('gpu_options'):
logging.warning(
'The Keras backend session has already been set. '
'The _session_config passed to model_to_estimator will not be used.')
else:
# Pass the config into keras backend's default session.
- sess = session.Session(config=estimator._session_config)
+ sess = session.Session(config=config.session_config)
K.set_session(sess)
- keras_weights = None
+ warm_start_path = None
if keras_model._is_graph_network:
- # TODO(yifeif): move checkpoint initialization to scaffold.init_fn
- _save_first_checkpoint(keras_model,
- estimator,
- custom_objects,
- keras_weights)
+ warm_start_path = _save_first_checkpoint(keras_model, custom_objects,
+ config)
elif keras_model.built:
- logging.warning('You are creating an Estimator from a Keras model '
- 'manually subclassed from `Model`, that was '
- 'already called on some inputs (and thus already had '
- 'weights). We are currently unable to preserve '
- 'the model\'s state (its weights) '
- 'as part of the estimator '
- 'in this case. Be warned that the estimator '
- 'has been created using '
- 'a freshly initialized version of your model.\n'
- 'Note that this doesn\'t affect the state of the '
- 'model instance you passed as `keras_model` argument.')
+ logging.warning('You are creating an Estimator from a Keras model manually '
+ 'subclassed from `Model`, that was already called on some '
+ 'inputs (and thus already had weights). We are currently '
+ 'unable to preserve the model\'s state (its weights) as '
+ 'part of the estimator in this case. Be warned that the '
+ 'estimator has been created using a freshly initialized '
+ 'version of your model.\n'
+ 'Note that this doesn\'t affect the state of the model '
+ 'instance you passed as `keras_model` argument.')
+
+ estimator = estimator_lib.Estimator(keras_model_fn,
+ config=config,
+ warm_start_from=warm_start_path)
+
return estimator
diff --git a/tensorflow/python/estimator/keras_test.py b/tensorflow/python/estimator/keras_test.py
index 7a4457f5a4..cf4ec7f4da 100644
--- a/tensorflow/python/estimator/keras_test.py
+++ b/tensorflow/python/estimator/keras_test.py
@@ -32,13 +32,14 @@ from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
-from tensorflow.python.keras.applications import mobilenet
from tensorflow.python.keras.optimizers import SGD
+from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.parsing_ops import gen_parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import rmsprop
+from tensorflow.python.training import session_run_hook
try:
@@ -51,6 +52,8 @@ _TRAIN_SIZE = 200
_INPUT_SIZE = (10,)
_NUM_CLASS = 2
+_TMP_DIR = '/tmp'
+
def simple_sequential_model():
model = keras.models.Sequential()
@@ -60,9 +63,9 @@ def simple_sequential_model():
return model
-def simple_functional_model():
+def simple_functional_model(activation='relu'):
a = keras.layers.Input(shape=_INPUT_SIZE)
- b = keras.layers.Dense(16, activation='relu')(a)
+ b = keras.layers.Dense(16, activation=activation)(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
@@ -168,6 +171,12 @@ def multi_inputs_multi_outputs_model():
return model
+class MyHook(session_run_hook.SessionRunHook):
+
+ def begin(self):
+ _ = variable_scope.get_variable('temp', [1])
+
+
class TestKerasEstimator(test_util.TensorFlowTestCase):
def setUp(self):
@@ -204,6 +213,54 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
+ # see b/109935364
+ @test_util.run_in_graph_and_eager_modes
+ def test_train_with_hooks(self):
+ for model_type in ['sequential', 'functional']:
+ keras_model, (_, _), (
+ _, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(
+ model_type=model_type, is_evaluate=True)
+ keras_model.compile(
+ loss='categorical_crossentropy',
+ optimizer=rmsprop.RMSPropOptimizer(1e-3),
+ metrics=['mse', keras.metrics.categorical_accuracy])
+
+ my_hook = MyHook()
+ with self.test_session():
+ est_keras = keras_lib.model_to_estimator(
+ keras_model=keras_model, config=self._config)
+ before_eval_results = est_keras.evaluate(
+ input_fn=eval_input_fn, steps=1)
+ est_keras.train(input_fn=train_input_fn, hooks=[my_hook],
+ steps=_TRAIN_SIZE / 16)
+ after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
+ self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
+
+ writer_cache.FileWriterCache.clear()
+ gfile.DeleteRecursively(self._config.model_dir)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_train_with_model_fit_and_hooks(self):
+ keras_model, (x_train, y_train), _, \
+ train_input_fn, eval_input_fn = get_resource_for_simple_model(
+ model_type='sequential', is_evaluate=True)
+
+ keras_model.compile(
+ loss='categorical_crossentropy',
+ optimizer=rmsprop.RMSPropOptimizer(1e-3),
+ metrics=['mse', keras.metrics.categorical_accuracy])
+ my_hook = MyHook()
+ with self.test_session():
+ keras_model.fit(x_train, y_train, epochs=1)
+
+ keras_est = keras_lib.model_to_estimator(
+ keras_model=keras_model, config=self._config)
+ before_eval_results = keras_est.evaluate(input_fn=eval_input_fn)
+ keras_est.train(input_fn=train_input_fn, hooks=[my_hook],
+ steps=_TRAIN_SIZE / 16)
+ after_eval_results = keras_est.evaluate(input_fn=eval_input_fn, steps=1)
+ self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
+
@test_util.run_in_graph_and_eager_modes
def test_train_with_tf_optimizer(self):
for model_type in ['sequential', 'functional']:
@@ -474,23 +531,43 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
est_keras.train(input_fn=invald_output_name_input_fn, steps=100)
def test_custom_objects(self):
- keras_mobile = mobilenet.MobileNet(weights=None)
- keras_mobile.compile(loss='categorical_crossentropy', optimizer='adam')
+
+ def relu6(x):
+ return keras.backend.relu(x, max_value=6)
+
+ keras_model = simple_functional_model(activation=relu6)
+ keras_model.compile(loss='categorical_crossentropy', optimizer='adam')
custom_objects = {
- 'relu6': mobilenet.relu6,
- 'DepthwiseConv2D': mobilenet.DepthwiseConv2D
+ 'relu6': relu6
}
+
+ (x_train, y_train), _ = testing_utils.get_test_data(
+ train_samples=_TRAIN_SIZE,
+ test_samples=50,
+ input_shape=(10,),
+ num_classes=2)
+ y_train = keras.utils.to_categorical(y_train, 2)
+ input_name = keras_model.input_names[0]
+ output_name = keras_model.output_names[0]
+ train_input_fn = numpy_io.numpy_input_fn(
+ x=randomize_io_type(x_train, input_name),
+ y=randomize_io_type(y_train, output_name),
+ shuffle=False,
+ num_epochs=None,
+ batch_size=16)
with self.assertRaisesRegexp(ValueError, 'relu6'):
with self.test_session():
- keras_lib.model_to_estimator(
- keras_model=keras_mobile,
+ est = keras_lib.model_to_estimator(
+ keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir))
+ est.train(input_fn=train_input_fn, steps=1)
with self.test_session():
- keras_lib.model_to_estimator(
- keras_model=keras_mobile,
+ est = keras_lib.model_to_estimator(
+ keras_model=keras_model,
model_dir=tempfile.mkdtemp(dir=self._base_dir),
custom_objects=custom_objects)
+ est.train(input_fn=train_input_fn, steps=1)
def test_tf_config(self):
keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
@@ -527,12 +604,73 @@ class TestKerasEstimator(test_util.TensorFlowTestCase):
gpu_options = config_pb2.GPUOptions(per_process_gpu_memory_fraction=0.3)
sess_config = config_pb2.ConfigProto(gpu_options=gpu_options)
self._config._session_config = sess_config
- keras_lib.model_to_estimator(
- keras_model=keras_model, config=self._config)
- self.assertEqual(
- keras.backend.get_session()
- ._config.gpu_options.per_process_gpu_memory_fraction,
- gpu_options.per_process_gpu_memory_fraction)
+ with self.test_session():
+ keras_lib.model_to_estimator(
+ keras_model=keras_model, config=self._config)
+ self.assertEqual(
+ keras.backend.get_session()
+ ._config.gpu_options.per_process_gpu_memory_fraction,
+ gpu_options.per_process_gpu_memory_fraction)
+
+ def test_with_empty_config(self):
+ keras_model, _, _, _, _ = get_resource_for_simple_model(
+ model_type='sequential', is_evaluate=True)
+ keras_model.compile(
+ loss='categorical_crossentropy',
+ optimizer='rmsprop',
+ metrics=['mse', keras.metrics.categorical_accuracy])
+
+ with self.test_session():
+ est_keras = keras_lib.model_to_estimator(
+ keras_model=keras_model, model_dir=self._base_dir,
+ config=run_config_lib.RunConfig())
+ self.assertEqual(run_config_lib.get_default_session_config(),
+ est_keras._session_config)
+ self.assertEqual(est_keras._session_config,
+ est_keras._config.session_config)
+ self.assertEqual(self._base_dir, est_keras._config.model_dir)
+ self.assertEqual(self._base_dir, est_keras._model_dir)
+
+ with self.test_session():
+ est_keras = keras_lib.model_to_estimator(
+ keras_model=keras_model, model_dir=self._base_dir,
+ config=None)
+ self.assertEqual(run_config_lib.get_default_session_config(),
+ est_keras._session_config)
+ self.assertEqual(est_keras._session_config,
+ est_keras._config.session_config)
+ self.assertEqual(self._base_dir, est_keras._config.model_dir)
+ self.assertEqual(self._base_dir, est_keras._model_dir)
+
+ def test_with_empty_config_and_empty_model_dir(self):
+ keras_model, _, _, _, _ = get_resource_for_simple_model(
+ model_type='sequential', is_evaluate=True)
+ keras_model.compile(
+ loss='categorical_crossentropy',
+ optimizer='rmsprop',
+ metrics=['mse', keras.metrics.categorical_accuracy])
+
+ with self.test_session():
+ with test.mock.patch.object(tempfile, 'mkdtemp', return_value=_TMP_DIR):
+ est_keras = keras_lib.model_to_estimator(
+ keras_model=keras_model,
+ config=run_config_lib.RunConfig())
+ self.assertEqual(est_keras._model_dir, _TMP_DIR)
+
+ def test_with_conflicting_model_dir_and_config(self):
+ keras_model, _, _, _, _ = get_resource_for_simple_model(
+ model_type='sequential', is_evaluate=True)
+ keras_model.compile(
+ loss='categorical_crossentropy',
+ optimizer='rmsprop',
+ metrics=['mse', keras.metrics.categorical_accuracy])
+
+ with self.test_session():
+ with self.assertRaisesRegexp(ValueError, '`model_dir` are set both in '
+ 'constructor and `RunConfig`'):
+ keras_lib.model_to_estimator(
+ keras_model=keras_model, model_dir=self._base_dir,
+ config=run_config_lib.RunConfig(model_dir=_TMP_DIR))
def test_pretrained_weights(self):
keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
diff --git a/tensorflow/python/estimator/run_config.py b/tensorflow/python/estimator/run_config.py
index aa594af2e4..6c1de166a4 100644
--- a/tensorflow/python/estimator/run_config.py
+++ b/tensorflow/python/estimator/run_config.py
@@ -48,7 +48,8 @@ _DEFAULT_REPLACEABLE_LIST = [
'keep_checkpoint_every_n_hours',
'log_step_count_steps',
'train_distribute',
- 'device_fn'
+ 'device_fn',
+ 'protocol'
]
_SAVE_CKPT_ERR = (
@@ -288,6 +289,21 @@ def _validate_properties(run_config):
message='device_fn must be callable with exactly'
' one argument "op".')
+ _validate('protocol',
+ lambda protocol: protocol in (None, "grpc", "grpc+verbs"),
+ message='protocol should be grpc or grpc+verbs')
+
+
+def get_default_session_config():
+ """Returns tf.ConfigProto instance."""
+
+ rewrite_opts = rewriter_config_pb2.RewriterConfig(
+ meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
+ graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
+
+ return config_pb2.ConfigProto(allow_soft_placement=True,
+ graph_options=graph_opts)
+
class TaskType(object):
MASTER = 'master'
@@ -312,7 +328,8 @@ class RunConfig(object):
keep_checkpoint_every_n_hours=10000,
log_step_count_steps=100,
train_distribute=None,
- device_fn=None):
+ device_fn=None,
+ protocol=None):
"""Constructs a RunConfig.
All distributed training related properties `cluster_spec`, `is_chief`,
@@ -436,7 +453,7 @@ class RunConfig(object):
the feature.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec and the loss will be logged during training.
- train_distribute: an optional instance of
+ train_distribute: An optional instance of
`tf.contrib.distribute.DistributionStrategy`. If specified,
then Estimator will distribute the user's model during training,
according to the policy specified by that strategy.
@@ -444,6 +461,8 @@ class RunConfig(object):
`Operation` and returns the device string. If `None`, defaults to
the device function returned by `tf.train.replica_device_setter`
with round-robin strategy.
+ protocol: An optional argument which specifies the protocol used when
+ starting server. None means default to grpc.
Raises:
ValueError: If both `save_checkpoints_steps` and `save_checkpoints_secs`
@@ -481,7 +500,8 @@ class RunConfig(object):
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
log_step_count_steps=log_step_count_steps,
train_distribute=train_distribute,
- device_fn=device_fn)
+ device_fn=device_fn,
+ protocol=protocol)
self._init_distributed_setting_from_environment_var(tf_config)
@@ -499,9 +519,9 @@ class RunConfig(object):
RunConfig._replace(
self,
allowed_properties_list=_DEFAULT_REPLACEABLE_LIST,
- session_config=self._get_default_session_config())
+ session_config=self._get_default_session_config_distributed())
- def _get_default_session_config(self):
+ def _get_default_session_config_distributed(self):
"""Returns None or tf.ConfigProto instance with default device_filters set.
Device filters are set such that chief/master and worker communicates with
@@ -754,6 +774,11 @@ class RunConfig(object):
"""
return self._train_distribute
+ @property
+ def protocol(self):
+ """Returns the optional protocol value."""
+ return self._protocol
+
def replace(self, **kwargs):
"""Returns a new instance of `RunConfig` replacing specified properties.
@@ -769,7 +794,8 @@ class RunConfig(object):
- `keep_checkpoint_every_n_hours`,
- `log_step_count_steps`,
- `train_distribute`,
- - `device_fn`.
+ - `device_fn`,
+ - `protocol`.
In addition, either `save_checkpoints_steps` or `save_checkpoints_secs`
can be set (should not be both).
diff --git a/tensorflow/python/estimator/training.py b/tensorflow/python/estimator/training.py
index f5ac79ced2..a01b2300dd 100644
--- a/tensorflow/python/estimator/training.py
+++ b/tensorflow/python/estimator/training.py
@@ -732,7 +732,8 @@ class _TrainingExecutor(object):
job_name=config.task_type,
task_index=config.task_id,
config=session_config,
- start=False)
+ start=False,
+ protocol=config.protocol)
server.start()
return server
diff --git a/tensorflow/python/estimator/training_test.py b/tensorflow/python/estimator/training_test.py
index 6bee7cbe83..dc106c7d3b 100644
--- a/tensorflow/python/estimator/training_test.py
+++ b/tensorflow/python/estimator/training_test.py
@@ -472,6 +472,7 @@ class _TrainingExecutorTrainingTest(object):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
@@ -502,6 +503,7 @@ class _TrainingExecutorTrainingTest(object):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
@@ -729,6 +731,7 @@ class TrainingExecutorRunMasterTest(test.TestCase):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
@@ -1481,6 +1484,7 @@ class TrainingExecutorRunPsTest(test.TestCase):
job_name=mock_est.config.task_type,
task_index=mock_est.config.task_id,
config=test.mock.ANY,
+ protocol=None,
start=False)
self.assertTrue(mock_server_instance.start.called)
diff --git a/tensorflow/python/framework/error_interpolation.py b/tensorflow/python/framework/error_interpolation.py
index 9ccae76147..a79073b748 100644
--- a/tensorflow/python/framework/error_interpolation.py
+++ b/tensorflow/python/framework/error_interpolation.py
@@ -24,11 +24,15 @@ from __future__ import print_function
import collections
import itertools
+import os
import re
import string
import six
+from tensorflow.python.util import tf_stack
+
+
_NAME_REGEX = r"[A-Za-z0-9.][A-Za-z0-9_.\-/]*?"
_FORMAT_REGEX = r"[A-Za-z0-9_.\-/${}:]+"
_TAG_REGEX = r"\^\^({name}):({name}):({fmt})\^\^".format(
@@ -38,6 +42,11 @@ _INTERPOLATION_PATTERN = re.compile(_INTERPOLATION_REGEX)
_ParseTag = collections.namedtuple("_ParseTag", ["type", "name", "format"])
+_BAD_FILE_SUBSTRINGS = [
+ os.path.join("tensorflow", "python"),
+ "<embedded",
+]
+
def _parse_message(message):
"""Parses the message.
@@ -48,6 +57,12 @@ def _parse_message(message):
"123^^node:Foo:${file}^^456^^node:Bar:${line}^^789", there are two tags and
three separators. The separators are the numeric characters.
+ Supported tags after node:<node_name>
+ file: Replaced with the filename in which the node was defined.
+ line: Replaced by the line number at which the node was defined.
+ colocations: Replaced by a multi-line message describing the file and
+ line numbers at which this node was colocated with other nodes.
+
Args:
message: String to parse
@@ -72,9 +87,135 @@ def _parse_message(message):
return seps, tags
-# TODO(jtkeeling): Modify to actually interpolate format strings rather than
-# echoing them.
-def interpolate(error_message):
+def _compute_colocation_summary_from_dict(colocation_dict, prefix=""):
+ """Return a summary of an op's colocation stack.
+
+ Args:
+ colocation_dict: The op._colocation_dict.
+ prefix: An optional string prefix used before each line of the multi-
+ line string returned by this function.
+
+ Returns:
+ A multi-line string similar to:
+ Node-device colocations active during op creation:
+ with tf.colocate_with(test_node_1): <test_1.py:27>
+ with tf.colocate_with(test_node_2): <test_2.py:38>
+ The first line will have no padding to its left by default. Subsequent
+ lines will have two spaces of left-padding. Use the prefix argument
+ to increase indentation.
+ """
+ if not colocation_dict:
+ message = "No node-device colocations were active during op creation."
+ return prefix + message
+
+ str_list = []
+ str_list.append("%sNode-device colocations active during op creation:"
+ % prefix)
+
+ for name, location in colocation_dict.items():
+ location_summary = "<{file}:{line}>".format(file=location.filename,
+ line=location.lineno)
+ subs = {
+ "prefix": prefix,
+ "indent": " ",
+ "name": name,
+ "loc": location_summary,
+ }
+ str_list.append(
+ "{prefix}{indent}with tf.colocate_with({name}): {loc}".format(**subs))
+
+ return "\n".join(str_list)
+
+
+def _compute_colocation_summary_from_op(op, prefix=""):
+ """Fetch colocation file, line, and nesting and return a summary string."""
+ if not op:
+ return ""
+ # pylint: disable=protected-access
+ return _compute_colocation_summary_from_dict(op._colocation_dict, prefix)
+ # pylint: enable=protected-access
+
+
+def _find_index_of_defining_frame_for_op(op):
+ """Return index in op._traceback with first 'useful' frame.
+
+ This method reads through the stack stored in op._traceback looking for the
+ innermost frame which (hopefully) belongs to the caller. It accomplishes this
+ by rejecting frames whose filename appears to come from TensorFlow (see
+ error_interpolation._BAD_FILE_SUBSTRINGS for the list of rejected substrings).
+
+ Args:
+ op: the Operation object for which we would like to find the defining
+ location.
+
+ Returns:
+ Integer index into op._traceback where the first non-TF file was found
+ (innermost to outermost), or 0 (for the outermost stack frame) if all files
+ came from TensorFlow.
+ """
+ # pylint: disable=protected-access
+ # Index 0 of tf_traceback is the outermost frame.
+ tf_traceback = tf_stack.convert_stack(op._traceback)
+ size = len(tf_traceback)
+ # pylint: enable=protected-access
+ filenames = [frame[tf_stack.TB_FILENAME] for frame in tf_traceback]
+ # We process the filenames from the innermost frame to outermost.
+ for idx, filename in enumerate(reversed(filenames)):
+ contains_bad_substrings = [ss in filename for ss in _BAD_FILE_SUBSTRINGS]
+ if not any(contains_bad_substrings):
+ return size - idx - 1
+ return 0
+
+
+def _get_defining_frame_from_op(op):
+ """Find and return stack frame where op was defined."""
+ frame = None
+ if op:
+ # pylint: disable=protected-access
+ frame_index = _find_index_of_defining_frame_for_op(op)
+ frame = op._traceback[frame_index]
+ # pylint: enable=protected-access
+ return frame
+
+
+def _compute_field_dict(op):
+ """Return a dictionary mapping interpolation tokens to values.
+
+ Args:
+ op: op.Operation object having a _traceback member.
+
+ Returns:
+ A dictionary mapping string tokens to string values. The keys are shown
+ below along with example values.
+ {
+ "file": "tool_utils.py",
+ "line": "124",
+ "colocations":
+ '''Node-device colocations active during op creation:
+ with tf.colocate_with(test_node_1): <test_1.py:27>
+ with tf.colocate_with(test_node_2): <test_2.py:38>'''
+ }
+ If op is None or lacks a _traceback field, the returned values will be
+ "<NA>".
+ """
+ default_value = "<NA>"
+ field_dict = {
+ "file": default_value,
+ "line": default_value,
+ "colocations": default_value,
+ }
+ frame = _get_defining_frame_from_op(op)
+ if frame:
+ field_dict["file"] = frame[tf_stack.TB_FILENAME]
+ field_dict["line"] = frame[tf_stack.TB_LINENO]
+ colocation_summary = _compute_colocation_summary_from_op(op)
+ if colocation_summary:
+ field_dict["colocations"] = colocation_summary
+
+ return field_dict
+
+
+def interpolate(error_message, graph):
"""Interpolates an error message.
The error message can contain tags of the form ^^type:name:format^^ which will
@@ -82,11 +223,26 @@ def interpolate(error_message):
Args:
error_message: A string to interpolate.
+ graph: ops.Graph object containing all nodes referenced in the error
+ message.
Returns:
The string with tags of the form ^^type:name:format^^ interpolated.
"""
seps, tags = _parse_message(error_message)
- subs = [string.Template(tag.format).safe_substitute({}) for tag in tags]
+
+ node_name_to_substitution_dict = {}
+ for name in [t.name for t in tags]:
+ try:
+ op = graph.get_operation_by_name(name)
+ except KeyError:
+ op = None
+
+ node_name_to_substitution_dict[name] = _compute_field_dict(op)
+
+ subs = [
+ string.Template(tag.format).safe_substitute(
+ node_name_to_substitution_dict[tag.name]) for tag in tags
+ ]
return "".join(
itertools.chain(*six.moves.zip_longest(seps, subs, fillvalue="")))
diff --git a/tensorflow/python/framework/error_interpolation_test.py b/tensorflow/python/framework/error_interpolation_test.py
index ad448deb62..1e5cb73854 100644
--- a/tensorflow/python/framework/error_interpolation_test.py
+++ b/tensorflow/python/framework/error_interpolation_test.py
@@ -18,31 +18,214 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import os
+
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import error_interpolation
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import traceable_stack
from tensorflow.python.platform import test
+from tensorflow.python.util import tf_stack
+
+
+def _make_frame_with_filename(op, idx, filename):
+ """Return a copy of an existing stack frame with a new filename."""
+ stack_frame = list(op._traceback[idx])
+ stack_frame[tf_stack.TB_FILENAME] = filename
+ return tuple(stack_frame)
+
+
+def _modify_op_stack_with_filenames(op, num_user_frames, user_filename,
+ num_inner_tf_frames):
+ """Replace op._traceback with a new traceback using special filenames."""
+ tf_filename = "%d" + error_interpolation._BAD_FILE_SUBSTRINGS[0]
+ user_filename = os.path.join("%d", "my_favorite_file.py")
+
+ num_requested_frames = num_user_frames + num_inner_tf_frames
+ num_actual_frames = len(op._traceback)
+ num_outer_frames = num_actual_frames - num_requested_frames
+ assert num_requested_frames <= num_actual_frames, "Too few real frames."
+
+ # The op's traceback has outermost frame at index 0.
+ stack = []
+ for idx in range(0, num_outer_frames):
+ stack.append(op._traceback[idx])
+ for idx in range(len(stack), len(stack)+num_user_frames):
+ stack.append(_make_frame_with_filename(op, idx, user_filename % idx))
+ for idx in range(len(stack), len(stack)+num_inner_tf_frames):
+ stack.append(_make_frame_with_filename(op, idx, tf_filename % idx))
+ op._traceback = stack
+
+
+def assert_node_in_colocation_summary(test_obj, colocation_summary_string,
+ name, filename="", lineno=""):
+ lineno = str(lineno)
+ name_phrase = "colocate_with(%s)" % name
+ for term in [name_phrase, filename, lineno]:
+ test_obj.assertIn(term, colocation_summary_string)
+ test_obj.assertNotIn("loc:@", colocation_summary_string)
+
+
+class ComputeColocationSummaryFromOpTest(test.TestCase):
+
+ def testCorrectFormatWithActiveColocations(self):
+ t_obj_1 = traceable_stack.TraceableObject(None,
+ filename="test_1.py",
+ lineno=27)
+ t_obj_2 = traceable_stack.TraceableObject(None,
+ filename="test_2.py",
+ lineno=38)
+ colocation_dict = {
+ "test_node_1": t_obj_1,
+ "test_node_2": t_obj_2,
+ }
+ summary = error_interpolation._compute_colocation_summary_from_dict(
+ colocation_dict, prefix=" ")
+ assert_node_in_colocation_summary(self,
+ summary,
+ name="test_node_1",
+ filename="test_1.py",
+ lineno=27)
+ assert_node_in_colocation_summary(self, summary,
+ name="test_node_2",
+ filename="test_2.py",
+ lineno=38)
+
+ def testCorrectFormatWhenNoColocationsWereActive(self):
+ colocation_dict = {}
+ summary = error_interpolation._compute_colocation_summary_from_dict(
+ colocation_dict, prefix=" ")
+ self.assertIn("No node-device colocations", summary)
class InterpolateTest(test.TestCase):
+ def setUp(self):
+ # Add nodes to the graph for retrieval by name later.
+ constant_op.constant(1, name="One")
+ constant_op.constant(2, name="Two")
+ three = constant_op.constant(3, name="Three")
+ self.graph = three.graph
+
+ # Change the list of bad file substrings so that constant_op.py is chosen
+ # as the defining stack frame for constant_op.constant ops.
+ self.old_bad_strings = error_interpolation._BAD_FILE_SUBSTRINGS
+ error_interpolation._BAD_FILE_SUBSTRINGS = [
+ "%sops.py" % os.sep,
+ "%sutil" % os.sep,
+ ]
+
+ def tearDown(self):
+ error_interpolation._BAD_FILE_SUBSTRINGS = self.old_bad_strings
+
+ def testFindIndexOfDefiningFrameForOp(self):
+ local_op = constant_op.constant(42).op
+ user_filename = "hope.py"
+ _modify_op_stack_with_filenames(local_op,
+ num_user_frames=3,
+ user_filename=user_filename,
+ num_inner_tf_frames=5)
+ idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
+ # Expected frame is 6th from the end because there are 5 inner frames witih
+ # TF filenames.
+ expected_frame = len(local_op._traceback) - 6
+ self.assertEqual(expected_frame, idx)
+
+ def testFindIndexOfDefiningFrameForOpReturnsZeroOnError(self):
+ local_op = constant_op.constant(43).op
+ # Truncate stack to known length.
+ local_op._traceback = local_op._traceback[:7]
+ # Ensure all frames look like TF frames.
+ _modify_op_stack_with_filenames(local_op,
+ num_user_frames=0,
+ user_filename="user_file.py",
+ num_inner_tf_frames=7)
+ idx = error_interpolation._find_index_of_defining_frame_for_op(local_op)
+ self.assertEqual(0, idx)
+
def testNothingToDo(self):
normal_string = "This is just a normal string"
- interpolated_string = error_interpolation.interpolate(normal_string)
+ interpolated_string = error_interpolation.interpolate(normal_string,
+ self.graph)
self.assertEqual(interpolated_string, normal_string)
def testOneTag(self):
- one_tag_string = "^^node:Foo:${file}^^"
- interpolated_string = error_interpolation.interpolate(one_tag_string)
- self.assertEqual(interpolated_string, "${file}")
+ one_tag_string = "^^node:Two:${file}^^"
+ interpolated_string = error_interpolation.interpolate(one_tag_string,
+ self.graph)
+ self.assertTrue(interpolated_string.endswith("constant_op.py"),
+ "interpolated_string '%s' did not end with constant_op.py"
+ % interpolated_string)
+
+ def testOneTagWithAFakeNameResultsInPlaceholders(self):
+ one_tag_string = "^^node:MinusOne:${file}^^"
+ interpolated_string = error_interpolation.interpolate(one_tag_string,
+ self.graph)
+ self.assertEqual(interpolated_string, "<NA>")
def testTwoTagsNoSeps(self):
- two_tags_no_seps = "^^node:Foo:${file}^^^^node:Bar:${line}^^"
- interpolated_string = error_interpolation.interpolate(two_tags_no_seps)
- self.assertEqual(interpolated_string, "${file}${line}")
+ two_tags_no_seps = "^^node:One:${file}^^^^node:Three:${line}^^"
+ interpolated_string = error_interpolation.interpolate(two_tags_no_seps,
+ self.graph)
+ self.assertRegexpMatches(interpolated_string, "constant_op.py[0-9]+")
def testTwoTagsWithSeps(self):
- two_tags_with_seps = "123^^node:Foo:${file}^^456^^node:Bar:${line}^^789"
- interpolated_string = error_interpolation.interpolate(two_tags_with_seps)
- self.assertEqual(interpolated_string, "123${file}456${line}789")
+ two_tags_with_seps = ";;;^^node:Two:${file}^^,,,^^node:Three:${line}^^;;;"
+ interpolated_string = error_interpolation.interpolate(two_tags_with_seps,
+ self.graph)
+ expected_regex = "^;;;.*constant_op.py,,,[0-9]*;;;$"
+ self.assertRegexpMatches(interpolated_string, expected_regex)
+
+
+class InterpolateColocationSummaryTest(test.TestCase):
+
+ def setUp(self):
+ # Add nodes to the graph for retrieval by name later.
+ node_one = constant_op.constant(1, name="One")
+ node_two = constant_op.constant(2, name="Two")
+
+ # node_three has one colocation group, obviously.
+ with ops.colocate_with(node_one):
+ node_three = constant_op.constant(3, name="Three_with_one")
+
+ # node_four has one colocation group even though three is (transitively)
+ # colocated with one.
+ with ops.colocate_with(node_three):
+ constant_op.constant(4, name="Four_with_three")
+
+ # node_five has two colocation groups because one and two are not colocated.
+ with ops.colocate_with(node_two):
+ with ops.colocate_with(node_one):
+ constant_op.constant(5, name="Five_with_one_with_two")
+
+ self.graph = node_three.graph
+
+ def testNodeThreeHasColocationInterpolation(self):
+ message = "^^node:Three_with_one:${colocations}^^"
+ result = error_interpolation.interpolate(message, self.graph)
+ assert_node_in_colocation_summary(self, result, name="One")
+
+ def testNodeFourHasColocationInterpolationForNodeThreeOnly(self):
+ message = "^^node:Four_with_three:${colocations}^^"
+ result = error_interpolation.interpolate(message, self.graph)
+ assert_node_in_colocation_summary(self, result, name="Three_with_one")
+ self.assertNotIn(
+ "One", result,
+ "Node One should not appear in Four_with_three's summary:\n%s"
+ % result)
+
+ def testNodeFiveHasColocationInterpolationForNodeOneAndTwo(self):
+ message = "^^node:Five_with_one_with_two:${colocations}^^"
+ result = error_interpolation.interpolate(message, self.graph)
+ assert_node_in_colocation_summary(self, result, name="One")
+ assert_node_in_colocation_summary(self, result, name="Two")
+
+ def testColocationInterpolationForNodeLackingColocation(self):
+ message = "^^node:One:${colocations}^^"
+ result = error_interpolation.interpolate(message, self.graph)
+ self.assertIn("No node-device colocations", result)
+ self.assertNotIn("One", result)
+ self.assertNotIn("Two", result)
if __name__ == "__main__":
diff --git a/tensorflow/python/framework/function_def_to_graph.py b/tensorflow/python/framework/function_def_to_graph.py
index 46c9c4c14a..1b09506662 100644
--- a/tensorflow/python/framework/function_def_to_graph.py
+++ b/tensorflow/python/framework/function_def_to_graph.py
@@ -25,7 +25,7 @@ from tensorflow.core.framework import types_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
-from tensorflow.python.framework import op_def_registry
+from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.ops import cond_v2_impl
@@ -114,6 +114,10 @@ def function_def_to_graph_def(fdef, input_shapes=None):
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER))
+ # Copy *all* functions from outer graph to `graph_def` so that both direct
+ # and indirect references are safely handled.
+ ops.get_default_graph()._copy_functions_to_graph_def(graph_def, 0) # pylint: disable=protected-access
+
if input_shapes and len(input_shapes) != len(fdef.signature.input_arg):
raise ValueError("Length of input_shapes must match the number of " +
"input_args. len(input_shapes): {} len(input_arg): {}".
@@ -142,24 +146,18 @@ def function_def_to_graph_def(fdef, input_shapes=None):
nested_to_flat_tensor_name[arg_def.name] = "{}:0".format(arg_def.name)
for node_def in fdef.node_def:
- op_def = op_def_registry.get_registered_ops().get(node_def.op)
- if not op_def:
- # TODO(b/80470245): Support functions which refer other functions.
- raise NotImplementedError(
- "No op registered for {},".format(node_def.op) +
- " it may be a function. function_def_to_graph_def " +
- "currently does not support converting functions with " +
- "references to other graph functions.")
+ op_def = ops.get_default_graph()._get_op_def(node_def.op) # pylint: disable=protected-access
for attr in op_def.attr:
- if attr.type in ("func", "list(func)"):
- # TODO(b/80470245): Support functions which refer other functions.
- raise NotImplementedError("Unsupported attr {} ".format(attr.name) +
- " with type {}".format(attr.type) +
- " in op {}. ".format(op_def.name) +
- "function_def_to_graph_def currently does " +
- "not support converting functions with " +
- "references to other graph functions.")
+ if attr.type == "func":
+ fname = node_def.attr[attr.name].func.name
+ if not ops.get_default_graph()._is_function(fname): # pylint: disable=protected-access
+ raise ValueError("%s function not found." % fname)
+ elif attr.type == "list(func)":
+ for fn in node_def.attr[attr.name].list.func:
+ fname = fn.name
+ if not ops.get_default_graph()._is_function(fname): # pylint: disable=protected-access
+ raise ValueError("%s function not found." % fname)
# Iterate over output_args in op_def to build the map.
# Index of the output tensor in the flattened list of *all* output
diff --git a/tensorflow/python/framework/function_def_to_graph_test.py b/tensorflow/python/framework/function_def_to_graph_test.py
index 0f4e6ef54f..cd2a16ed5a 100644
--- a/tensorflow/python/framework/function_def_to_graph_test.py
+++ b/tensorflow/python/framework/function_def_to_graph_test.py
@@ -18,7 +18,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import function
from tensorflow.python.framework import function_def_to_graph
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
@@ -79,7 +81,6 @@ class FunctionDefToGraphTest(test.TestCase):
g = function_def_to_graph.function_def_to_graph(
fdef, input_shapes=[None, tensor_shape.matrix(5, 7)])
- print(g.as_graph_def())
self.assertIsNone(g.inputs[0].shape.dims)
self.assertSequenceEqual(g.inputs[1].shape.dims, [5, 7])
self.assertSequenceEqual(g.outputs[0].shape.dims, [5, 7])
@@ -179,6 +180,37 @@ class FunctionDefToGraphDefTest(test.TestCase):
self.assertEqual(g.node[0].attr["shape"].shape.unknown_rank, False)
self.assertFalse("shape" in g.node[2].attr)
+ def testFunctionCallsFromFunction(self):
+ x = constant_op.constant(5.0)
+ y = constant_op.constant(10.0)
+
+ @function.Defun()
+ def fn():
+
+ @function.Defun()
+ def inner_fn():
+ return x + y
+
+ return inner_fn()
+
+ # Instantiate the function in this graph so that
+ # `function_def_to_graph` can find it.
+ fn()
+
+ def fn2():
+ return 2 * fn()
+
+ fdef = function._DefinedFunction(fn2, [], []).definition
+ func_graph = function_def_to_graph.function_def_to_graph(fdef)
+ with func_graph.as_default():
+ x_ph, y_ph = func_graph.inputs
+ with self.test_session(graph=func_graph) as sess:
+ self.assertEqual(
+ sess.run(func_graph.outputs[0], feed_dict={
+ x_ph: 5.0,
+ y_ph: 10.0
+ }), 30.0)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py
index 15e41ba91f..1707f929b8 100644
--- a/tensorflow/python/framework/function_test.py
+++ b/tensorflow/python/framework/function_test.py
@@ -537,19 +537,25 @@ class FunctionTest(test.TestCase):
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
+ expected_type = dtypes.float32
+ expected_shape = tensor_shape.TensorShape((4, 4))
v = variable_scope.get_variable(
- "var", (4, 4), dtypes.float32, use_resource=True)
+ "var", expected_shape, expected_type, use_resource=True)
@function.Defun()
def Foo():
- return array_ops.identity(v)
+ captured = array_ops.identity(v)
+ self.assertEqual(expected_type, captured.dtype)
+ self.assertEqual(expected_shape, captured.shape)
+ return captured, array_ops.shape(captured)
- y = v.value()
- z = Foo()
+ expected_val = v.value()
+ actual_val, actual_shape = Foo()
with self.test_session(graph=g):
v.initializer.run()
- self.assertAllEqual(y.eval(), z.eval())
+ self.assertAllEqual(expected_val.eval(), actual_val.eval())
+ self.assertAllEqual(expected_shape, actual_shape.eval())
def testDefineErrors(self):
with ops.Graph().as_default():
diff --git a/tensorflow/python/framework/kernels.py b/tensorflow/python/framework/kernels.py
new file mode 100644
index 0000000000..f7641f3442
--- /dev/null
+++ b/tensorflow/python/framework/kernels.py
@@ -0,0 +1,46 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Functions for querying registered kernels."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.core.framework import kernel_def_pb2
+from tensorflow.python import pywrap_tensorflow as c_api
+from tensorflow.python.util import compat
+
+
+def get_all_registered_kernels():
+ """Returns a KernelList proto of all registered kernels.
+ """
+ buf = c_api.TF_GetAllRegisteredKernels()
+ data = c_api.TF_GetBuffer(buf)
+ kernel_list = kernel_def_pb2.KernelList()
+ kernel_list.ParseFromString(compat.as_bytes(data))
+ return kernel_list
+
+
+def get_registered_kernels_for_op(name):
+ """Returns a KernelList proto of registered kernels for a given op.
+
+ Args:
+ name: A string representing the name of the op whose kernels to retrieve.
+ """
+ buf = c_api.TF_GetRegisteredKernelsForOp(name)
+ data = c_api.TF_GetBuffer(buf)
+ kernel_list = kernel_def_pb2.KernelList()
+ kernel_list.ParseFromString(compat.as_bytes(data))
+ return kernel_list
diff --git a/tensorflow/python/framework/kernels_test.py b/tensorflow/python/framework/kernels_test.py
new file mode 100644
index 0000000000..c53500be73
--- /dev/null
+++ b/tensorflow/python/framework/kernels_test.py
@@ -0,0 +1,41 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for querying registered kernels."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import kernels
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import googletest
+
+
+class GetAllRegisteredKernelsTest(test_util.TensorFlowTestCase):
+
+ def testFindsAtLeastOneKernel(self):
+ kernel_list = kernels.get_all_registered_kernels()
+ self.assertGreater(len(kernel_list.kernel), 0)
+
+
+class GetRegisteredKernelsForOp(test_util.TensorFlowTestCase):
+
+ def testFindsAtLeastOneKernel(self):
+ kernel_list = kernels.get_registered_kernels_for_op("KernelLabel")
+ self.assertGreater(len(kernel_list.kernel), 0)
+ self.assertEqual(kernel_list.kernel[0].op, "KernelLabel")
+
+
+if __name__ == "__main__":
+ googletest.main()
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index b07c57d265..6a5c44e4d9 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -20,7 +20,6 @@ from __future__ import print_function
import collections
import copy
-import linecache
import os
import re
import sys
@@ -49,7 +48,9 @@ from tensorflow.python.framework import errors
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
+from tensorflow.python.util import tf_stack
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
@@ -1711,10 +1712,14 @@ class Operation(object):
# This will be set by self.inputs.
self._inputs_val = None
- self._id_value = self._graph._next_id() # pylint: disable=protected-access
+ # pylint: disable=protected-access
+ self._id_value = self._graph._next_id()
self._original_op = original_op
- self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
- self._control_flow_context = self.graph._get_control_flow_context() # pylint: disable=protected-access
+ self._traceback = tf_stack.extract_stack()
+ # List of traceable_stack.TraceableObjects for colocation context managers.
+ self._colocation_code_locations = None
+ self._control_flow_context = self.graph._get_control_flow_context()
+ # pylint: enable=protected-access
# Initialize self._c_op.
if c_op:
@@ -1853,6 +1858,42 @@ class Operation(object):
return c_api.TF_OperationDevice(self._c_op)
@property
+ def _colocation_dict(self):
+ """Code locations for colocation context managers active at op creation.
+
+ This property will return a dictionary for which the keys are nodes with
+ which this Operation is colocated, and for which the values are
+ traceable_stack.TraceableObject instances. The TraceableObject instances
+ record the location of the relevant colocation context manager but have the
+ "obj" field set to None to prevent leaking private data.
+
+ For example, suppose file_a contained these lines:
+
+ file_a.py:
+ 14: node_a = tf.constant(3, name='NODE_A')
+ 15: with tf.colocate_with(node_a):
+ 16: node_b = tf.constant(4, name='NODE_B')
+
+ Then a TraceableObject t_obj representing the colocation context manager
+ would have these member values:
+
+ t_obj.obj -> None
+ t_obj.name = 'NODE_A'
+ t_obj.filename = 'file_a.py'
+ t_obj.lineno = 15
+
+ and node_b.op._colocation_code_locations would return the dictionary
+
+ { 'NODE_A': t_obj }
+
+ Returns:
+ {str: traceable_stack.TraceableObject} as per this method's description,
+ above.
+ """
+ locations_dict = self._colocation_code_locations or {}
+ return locations_dict.copy()
+
+ @property
def _output_types(self):
"""List this operation's output types.
@@ -2154,7 +2195,7 @@ class Operation(object):
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
- return self._graph._convert_stack(self._traceback) # pylint: disable=protected-access
+ return tf_stack.convert_stack(self._traceback)
@property
def traceback_with_start_lines(self):
@@ -2163,9 +2204,8 @@ class Operation(object):
Returns:
A list of 5-tuples (filename, lineno, name, code, func_start_lineno).
"""
- return self._graph._convert_stack( # pylint: disable=protected-access
- self._traceback,
- include_func_start_lineno=True)
+ return tf_stack.convert_stack(self._traceback,
+ include_func_start_lineno=True)
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
@@ -2617,7 +2657,6 @@ def _name_from_scope_name(name):
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
-
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
@@ -2726,7 +2765,7 @@ class Graph(object):
self._building_function = False
# Stack of colocate_with ops. After switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
- self._graph_colocation_stack = []
+ self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = set()
# Set of operations that are dangerous to fetch!
@@ -2766,36 +2805,6 @@ class Graph(object):
"""Temporary hack; can be overridden to force C API usage."""
return _USE_C_API
- def _convert_stack(self, stack, include_func_start_lineno=False):
- """Converts a stack extracted using _extract_stack() to a traceback stack.
-
- Args:
- stack: A list of n 5-tuples,
- (filename, lineno, name, frame_globals, func_start_lineno).
- include_func_start_lineno: True if function start line number should be
- included as the 5th entry in return tuples.
-
- Returns:
- A list of n 4-tuples or 5-tuples
- (filename, lineno, name, code, [optional: func_start_lineno]), where the
- code tuple element is calculated from the corresponding elements of the
- input tuple.
- """
- ret = []
- for (filename, lineno, name, frame_globals, func_start_lineno,
- unused_frame_info) in stack:
- linecache.checkcache(filename)
- line = linecache.getline(filename, lineno, frame_globals)
- if line:
- line = line.strip()
- else:
- line = None
- if include_func_start_lineno:
- ret.append((filename, lineno, name, line, func_start_lineno))
- else:
- ret.append((filename, lineno, name, line))
- return ret
-
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
@@ -2803,63 +2812,23 @@ class Graph(object):
# This step makes a copy of the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = list(self._variable_creator_stack)
- self._thread_local._variable_creator_stack.append(creator)
+ self._thread_local._variable_creator_stack.append(creator) # pylint: disable=protected-access
try:
yield
finally:
- self._thread_local._variable_creator_stack = old
+ self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
- self._thread_local._variable_creator_stack = []
- return list(self._thread_local._variable_creator_stack)
+ self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
+ return list(self._thread_local._variable_creator_stack) # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
- self._thread_local._variable_creator_stack = variable_creator_stack
-
- def _extract_stack(self):
- """A lightweight, extensible re-implementation of traceback.extract_stack.
-
- NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
- each stack frame using linecache, which results in an abundance of stat()
- calls. This implementation does not retrieve the code, and any consumer
- should apply _convert_stack to the result to obtain a traceback that can
- be formatted etc. using traceback methods.
-
- Derived classes can implement _extract_frame_info() to add extra information
- to the traceback.
-
- Returns:
- A list of 6-tuples
- (filename, lineno, name, frame_globals, func_start_lineno, custom_info)
- corresponding to the call stack of the current thread.
- """
- try:
- raise ZeroDivisionError
- except ZeroDivisionError:
- f = sys.exc_info()[2].tb_frame.f_back
- ret = []
- while f is not None:
- lineno = f.f_lineno
- co = f.f_code
- filename = co.co_filename
- name = co.co_name
- frame_globals = f.f_globals
- func_start_lineno = co.co_firstlineno
- frame_info = self._extract_frame_info(f)
- ret.append((filename, lineno, name, frame_globals, func_start_lineno,
- frame_info))
- f = f.f_back
- ret.reverse()
- return ret
-
- def _extract_frame_info(self, frame): # pylint: disable=unused-argument
- """Extracts custom information from a frame in an op traceback."""
- return None
+ self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
@@ -3301,7 +3270,7 @@ class Graph(object):
if self._colocation_stack:
all_colocation_groups = []
- for colocation_op in self._colocation_stack:
+ for colocation_op in self._colocation_stack.peek_objs():
all_colocation_groups.extend(colocation_op.colocation_groups())
if colocation_op.device:
# Make this device match the device of the colocated op, to provide
@@ -3320,6 +3289,7 @@ class Graph(object):
# pylint: disable=protected-access
op._set_attr("_class", attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
+ op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
# Sets "container" attribute if
@@ -3629,9 +3599,13 @@ class Graph(object):
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
- create a new graph explicitly. Use this method with the `with` keyword
- to specify that ops created within the scope of a block should be
- added to this graph.
+ create a new graph explicitly.
+
+ Use this method with the `with` keyword to specify that ops created within
+ the scope of a block should be added to this graph. In this case, once
+ the scope of the `with` is exited, the previous default graph is set again
+ as default. There is a stack, so it's ok to have multiple nested levels
+ of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
@@ -4074,10 +4048,13 @@ class Graph(object):
if ignore_existing:
current_stack = self._colocation_stack
- self._colocation_stack = []
+ self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
- self._colocation_stack.append(op)
+ # offset refers to the stack frame used for storing code location.
+ # We use 4, the sum of 1 to use our caller's stack frame and 3
+ # to jump over layers of context managers above us.
+ self._colocation_stack.push_obj(op, offset=4)
try:
yield
@@ -4085,7 +4062,7 @@ class Graph(object):
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
- self._colocation_stack.pop()
+ self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
@@ -4712,15 +4689,24 @@ class Graph(object):
@property
def _colocation_stack(self):
+ """Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
if not hasattr(self._thread_local, "_colocation_stack"):
- self._thread_local._colocation_stack = self._graph_colocation_stack[:]
+ stack_copy_for_this_thread = self._graph_colocation_stack.copy()
+ # pylint: disable=protected-access
+ self._thread_local._colocation_stack = stack_copy_for_this_thread
+ # pylint: enable=protected-access
return self._thread_local._colocation_stack
else:
return self._graph_colocation_stack
+ def _snapshot_colocation_stack_metadata(self):
+ """Return colocation stack metadata as a dictionary."""
+ traceable_objects = self._colocation_stack.peek_traceable_objs()
+ return {obj.obj.name: obj.copy_metadata() for obj in traceable_objects}
+
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
@@ -5251,7 +5237,10 @@ def enable_eager_execution(config=None,
to this function.
"""
return enable_eager_execution_internal(
- config, device_policy, execution_mode, None)
+ config=config,
+ device_policy=device_policy,
+ execution_mode=execution_mode,
+ server_def=None)
def enable_eager_execution_internal(config=None,
diff --git a/tensorflow/python/framework/ops_test.py b/tensorflow/python/framework/ops_test.py
index 150100d771..f848b69782 100644
--- a/tensorflow/python/framework/ops_test.py
+++ b/tensorflow/python/framework/ops_test.py
@@ -2554,6 +2554,14 @@ class ColocationGroupTest(test_util.TensorFlowTestCase):
with self.assertRaises(ValueError):
c.op.get_attr("_class")
+ # Roughly test that stack information is being saved correctly for the op.
+ locations_dict = b.op._colocation_dict
+ self.assertIn("a", locations_dict)
+ metadata = locations_dict["a"]
+ self.assertIsNone(metadata.obj)
+ basename = metadata.filename.split("/")[-1]
+ self.assertEqual("ops_test.py", basename)
+
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc
index ec3748b40e..76d4c2017c 100644
--- a/tensorflow/python/framework/python_op_gen.cc
+++ b/tensorflow/python/framework/python_op_gen.cc
@@ -943,6 +943,7 @@ from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
+from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util.tf_export import tf_export
)");
diff --git a/tensorflow/python/framework/python_op_gen_internal.cc b/tensorflow/python/framework/python_op_gen_internal.cc
index 940bffb906..031b4a384e 100644
--- a/tensorflow/python/framework/python_op_gen_internal.cc
+++ b/tensorflow/python/framework/python_op_gen_internal.cc
@@ -588,10 +588,12 @@ void GenPythonOp::AddExport() {
return;
}
+ // Add @tf_export decorator.
strings::StrAppend(&result_, "@tf_export(");
// Add all endpoint names to tf_export.
bool first_endpoint = true;
+ std::vector<string> deprecated_endpoints;
for (const auto& endpoint : api_def_.endpoint()) {
if (!first_endpoint) {
strings::StrAppend(&result_, ", ");
@@ -601,9 +603,32 @@ void GenPythonOp::AddExport() {
string endpoint_name;
python_op_gen_internal::GenerateLowerCaseOpName(endpoint.name(),
&endpoint_name);
+ if (endpoint.deprecated()) {
+ deprecated_endpoints.push_back(endpoint_name);
+ }
strings::StrAppend(&result_, "'", endpoint_name, "'");
}
strings::StrAppend(&result_, ")\n");
+
+ // If all endpoints are deprecated, add @deprecated decorator.
+ if (!api_def_.deprecation_message().empty()) {
+ const string instructions = api_def_.deprecation_message();
+ strings::StrAppend(&result_, "@deprecated(None, '", instructions, "')\n");
+ }
+ // Add @deprecated_endpoints decorator.
+ if (!deprecated_endpoints.empty()) {
+ strings::StrAppend(&result_, "@deprecated_endpoints(");
+ bool first_endpoint = true;
+ for (auto& endpoint_name : deprecated_endpoints) {
+ if (first_endpoint) {
+ first_endpoint = false;
+ } else {
+ strings::StrAppend(&result_, ", ");
+ }
+ strings::StrAppend(&result_, "'", endpoint_name, "'");
+ }
+ strings::StrAppend(&result_, ")\n");
+ }
}
void GenPythonOp::AddDefLine(const string& function_name,
diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py
index 7797d991da..cee7398974 100644
--- a/tensorflow/python/framework/subscribe.py
+++ b/tensorflow/python/framework/subscribe.py
@@ -47,7 +47,7 @@ def _recursive_apply(tensors, apply_fn):
tensors_type = type(tensors)
if tensors_type is ops.Tensor:
return apply_fn(tensors)
- elif tensors_type is variables.Variable:
+ elif isinstance(tensors, variables.Variable):
return apply_fn(tensors.value())
elif isinstance(tensors, (list, tuple)):
tensors = [_recursive_apply(t, apply_fn) for t in tensors]
diff --git a/tensorflow/python/framework/tensor_util.py b/tensorflow/python/framework/tensor_util.py
index ca63efbc84..8c9dfce7cc 100644
--- a/tensorflow/python/framework/tensor_util.py
+++ b/tensorflow/python/framework/tensor_util.py
@@ -935,8 +935,10 @@ def constant_value_as_shape(tensor): # pylint: disable=invalid-name
def is_tensor(x): # pylint: disable=invalid-name
"""Check whether `x` is of tensor type.
- Check whether an object is a tensor. Equivalent to
- `isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
+ Check whether an object is a tensor. This check is equivalent to calling
+ `isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])` and also checks
+ if all the component variables of a MirroredVariable or a TowerLocalVariable
+ are tensors.
Args:
x: A python object to check.
@@ -944,4 +946,5 @@ def is_tensor(x): # pylint: disable=invalid-name
Returns:
`True` if `x` is a tensor, `False` if not.
"""
- return isinstance(x, ops._TensorLike) or ops.is_dense_tensor_like(x) # pylint: disable=protected-access
+ return (isinstance(x, ops._TensorLike) or ops.is_dense_tensor_like(x) or # pylint: disable=protected-access
+ (hasattr(x, "is_tensor_like") and x.is_tensor_like))
diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py
index 2bc2a189fa..fc47b1cca5 100644
--- a/tensorflow/python/framework/test_util.py
+++ b/tensorflow/python/framework/test_util.py
@@ -19,6 +19,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import collections
+from collections import OrderedDict
import contextlib
import gc
import itertools
@@ -571,6 +573,78 @@ def assert_no_garbage_created(f):
return decorator
+def _combine_named_parameters(**kwargs):
+ """Generate combinations based on its keyword arguments.
+
+ Two sets of returned combinations can be concatenated using +. Their product
+ can be computed using `times()`.
+
+ Args:
+ **kwargs: keyword arguments of form `option=[possibilities, ...]`
+ or `option=the_only_possibility`.
+
+ Returns:
+ a list of dictionaries for each combination. Keys in the dictionaries are
+ the keyword argument names. Each key has one value - one of the
+ corresponding keyword argument values.
+ """
+ if not kwargs:
+ return [OrderedDict()]
+
+ sort_by_key = lambda k: k[0][0]
+ kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
+ first = list(kwargs.items())[0]
+
+ rest = dict(list(kwargs.items())[1:])
+ rest_combined = _combine_named_parameters(**rest)
+
+ key = first[0]
+ values = first[1]
+ if not isinstance(values, list):
+ values = [values]
+
+ combinations = [
+ OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
+ for v in values
+ for combined in rest_combined
+ ]
+ return combinations
+
+
+def generate_combinations_with_testcase_name(**kwargs):
+ """Generate combinations based on its keyword arguments using combine().
+
+ This function calls combine() and appends a testcase name to the list of
+ dictionaries returned. The 'testcase_name' key is a required for named
+ parameterized tests.
+
+ Args:
+ **kwargs: keyword arguments of form `option=[possibilities, ...]`
+ or `option=the_only_possibility`.
+
+ Returns:
+ a list of dictionaries for each combination. Keys in the dictionaries are
+ the keyword argument names. Each key has one value - one of the
+ corresponding keyword argument values.
+ """
+ combinations = _combine_named_parameters(**kwargs)
+ named_combinations = []
+ for combination in combinations:
+ assert isinstance(combination, OrderedDict)
+ name = "".join([
+ "_{}_{}".format(
+ "".join(filter(str.isalnum, key)),
+ "".join(filter(str.isalnum, str(value))))
+ for key, value in combination.items()
+ ])
+ named_combinations.append(
+ OrderedDict(
+ list(combination.items()) + [("testcase_name",
+ "_test{}".format(name))]))
+
+ return named_combinations
+
+
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
@@ -1227,8 +1301,8 @@ class TensorFlowTestCase(googletest.TestCase):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
- a_is_dict = isinstance(a, dict)
- if a_is_dict != isinstance(b, dict):
+ a_is_dict = isinstance(a, collections.Mapping)
+ if a_is_dict != isinstance(b, collections.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
diff --git a/tensorflow/python/framework/traceable_stack.py b/tensorflow/python/framework/traceable_stack.py
new file mode 100644
index 0000000000..7f4d28237f
--- /dev/null
+++ b/tensorflow/python/framework/traceable_stack.py
@@ -0,0 +1,132 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""A simple stack that associates filename and line numbers with each object."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.util import tf_stack
+
+
+class TraceableObject(object):
+ """Wrap an object together with its the code definition location."""
+
+ # Return codes for the set_filename_and_line_from_caller() method.
+ SUCCESS, HEURISTIC_USED, FAILURE = (0, 1, 2)
+
+ def __init__(self, obj, filename=None, lineno=None):
+ self.obj = obj
+ self.filename = filename
+ self.lineno = lineno
+
+ def set_filename_and_line_from_caller(self, offset=0):
+ """Set filename and line using the caller's stack frame.
+
+ If the requested stack information is not available, a heuristic may
+ be applied and self.HEURISTIC USED will be returned. If the heuristic
+ fails then no change will be made to the filename and lineno members
+ (None by default) and self.FAILURE will be returned.
+
+ Args:
+ offset: Integer. If 0, the caller's stack frame is used. If 1,
+ the caller's caller's stack frame is used. Larger values are
+ permissible but if out-of-range (larger than the number of stack
+ frames available) the outermost stack frame will be used.
+
+ Returns:
+ TraceableObject.SUCCESS if appropriate stack information was found,
+ TraceableObject.HEURISTIC_USED if the offset was larger than the stack,
+ and TraceableObject.FAILURE if the stack was empty.
+ """
+ # Offset is defined in "Args" as relative to the caller. We are one frame
+ # beyond the caller.
+ local_offset = offset + 1
+
+ frame_records = tf_stack.extract_stack()
+ if not frame_records:
+ return self.FAILURE
+ if len(frame_records) >= local_offset:
+ # Negative indexing is one-indexed instead of zero-indexed.
+ negative_offset = -(local_offset + 1)
+ self.filename, self.lineno = frame_records[negative_offset][:2]
+ return self.SUCCESS
+ else:
+ # If the offset is too large then we use the largest offset possible,
+ # meaning we use the outermost stack frame at index 0.
+ self.filename, self.lineno = frame_records[0][:2]
+ return self.HEURISTIC_USED
+
+ def copy_metadata(self):
+ """Return a TraceableObject like this one, but without the object."""
+ return self.__class__(None, filename=self.filename, lineno=self.lineno)
+
+
+class TraceableStack(object):
+ """A stack of TraceableObjects."""
+
+ def __init__(self, existing_stack=None):
+ """Constructor.
+
+ Args:
+ existing_stack: [TraceableObject, ...] If provided, this object will
+ set its new stack to a SHALLOW COPY of existing_stack.
+ """
+ self._stack = existing_stack[:] if existing_stack else []
+
+ def push_obj(self, obj, offset=0):
+ """Add object to the stack and record its filename and line information.
+
+ Args:
+ obj: An object to store on the stack.
+ offset: Integer. If 0, the caller's stack frame is used. If 1,
+ the caller's caller's stack frame is used.
+
+ Returns:
+ TraceableObject.SUCCESS if appropriate stack information was found,
+ TraceableObject.HEURISTIC_USED if the stack was smaller than expected,
+ and TraceableObject.FAILURE if the stack was empty.
+ """
+ traceable_obj = TraceableObject(obj)
+ self._stack.append(traceable_obj)
+ # Offset is defined in "Args" as relative to the caller. We are 1 frame
+ # beyond the caller and need to compensate.
+ return traceable_obj.set_filename_and_line_from_caller(offset + 1)
+
+ def pop_obj(self):
+ """Remove last-inserted object and return it, without filename/line info."""
+ return self._stack.pop().obj
+
+ def peek_objs(self):
+ """Return list of stored objects ordered newest to oldest."""
+ return [t_obj.obj for t_obj in reversed(self._stack)]
+
+ def peek_traceable_objs(self):
+ """Return list of stored TraceableObjects ordered newest to oldest."""
+ return list(reversed(self._stack))
+
+ def __len__(self):
+ """Return number of items on the stack, and used for truth-value testing."""
+ return len(self._stack)
+
+ def copy(self):
+ """Return a copy of self referencing the same objects but in a new list.
+
+ This method is implemented to support thread-local stacks.
+
+ Returns:
+ TraceableStack with a new list that holds existing objects.
+ """
+ return TraceableStack(self._stack)
diff --git a/tensorflow/python/framework/traceable_stack_test.py b/tensorflow/python/framework/traceable_stack_test.py
new file mode 100644
index 0000000000..3e7876f631
--- /dev/null
+++ b/tensorflow/python/framework/traceable_stack_test.py
@@ -0,0 +1,133 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.python.framework.traceable_stack."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+from tensorflow.python.framework import test_util
+from tensorflow.python.framework import traceable_stack
+from tensorflow.python.platform import googletest
+from tensorflow.python.util import tf_inspect as inspect
+
+_LOCAL_OBJECT = lambda x: x
+_THIS_FILENAME = inspect.getsourcefile(_LOCAL_OBJECT)
+
+
+class TraceableObjectTest(test_util.TensorFlowTestCase):
+
+ def testSetFilenameAndLineFromCallerUsesCallersStack(self):
+ t_obj = traceable_stack.TraceableObject(17)
+
+ # Do not separate placeholder from the set_filename_and_line_from_caller()
+ # call one line below it as it is used to calculate the latter's line
+ # number.
+ placeholder = lambda x: x
+ result = t_obj.set_filename_and_line_from_caller()
+
+ expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
+ self.assertEqual(expected_lineno, t_obj.lineno)
+ self.assertEqual(_THIS_FILENAME, t_obj.filename)
+ self.assertEqual(t_obj.SUCCESS, result)
+
+ def testSetFilenameAndLineFromCallerRespectsOffset(self):
+
+ def call_set_filename_and_line_from_caller(t_obj):
+ # We expect to retrieve the line number from _our_ caller.
+ return t_obj.set_filename_and_line_from_caller(offset=1)
+
+ t_obj = traceable_stack.TraceableObject(None)
+ # Do not separate placeholder from the
+ # call_set_filename_and_line_from_caller() call one line below it as it is
+ # used to calculate the latter's line number.
+ placeholder = lambda x: x
+ result = call_set_filename_and_line_from_caller(t_obj)
+
+ expected_lineno = inspect.getsourcelines(placeholder)[1] + 1
+ self.assertEqual(expected_lineno, t_obj.lineno)
+ self.assertEqual(t_obj.SUCCESS, result)
+
+ def testSetFilenameAndLineFromCallerHandlesRidiculousOffset(self):
+ t_obj = traceable_stack.TraceableObject('The quick brown fox.')
+ # This line shouldn't die.
+ result = t_obj.set_filename_and_line_from_caller(offset=300)
+
+ # We expect a heuristic to be used because we are not currently 300 frames
+ # down on the stack. The filename and lineno of the outermost frame are not
+ # predictable -- in some environments the filename is this test file, but in
+ # other environments it is not (e.g. due to a test runner calling this
+ # file). Therefore we only test that the called function knows it applied a
+ # heuristic for the ridiculous stack offset.
+ self.assertEqual(t_obj.HEURISTIC_USED, result)
+
+
+class TraceableStackTest(test_util.TensorFlowTestCase):
+
+ def testPushPeekPopObj(self):
+ t_stack = traceable_stack.TraceableStack()
+ t_stack.push_obj(42.0)
+ t_stack.push_obj('hope')
+
+ expected_lifo_peek = ['hope', 42.0]
+ self.assertEqual(expected_lifo_peek, t_stack.peek_objs())
+
+ self.assertEqual('hope', t_stack.pop_obj())
+ self.assertEqual(42.0, t_stack.pop_obj())
+
+ def testPushPopPreserveLifoOrdering(self):
+ t_stack = traceable_stack.TraceableStack()
+ t_stack.push_obj(0)
+ t_stack.push_obj(1)
+ t_stack.push_obj(2)
+ t_stack.push_obj(3)
+
+ obj_3 = t_stack.pop_obj()
+ obj_2 = t_stack.pop_obj()
+ obj_1 = t_stack.pop_obj()
+ obj_0 = t_stack.pop_obj()
+
+ self.assertEqual(3, obj_3)
+ self.assertEqual(2, obj_2)
+ self.assertEqual(1, obj_1)
+ self.assertEqual(0, obj_0)
+
+ def testPushObjSetsFilenameAndLineInfoForCaller(self):
+ t_stack = traceable_stack.TraceableStack()
+
+ # We expect that the line number recorded for the 1-object will come from
+ # the call to t_stack.push_obj(1). Do not separate the next two lines!
+ placeholder_1 = lambda x: x
+ t_stack.push_obj(1)
+
+ # We expect that the line number recorded for the 2-object will come from
+ # the call to call_push_obj() and _not_ the call to t_stack.push_obj().
+ def call_push_obj(obj):
+ t_stack.push_obj(obj, offset=1)
+
+ # Do not separate the next two lines!
+ placeholder_2 = lambda x: x
+ call_push_obj(2)
+
+ expected_lineno_1 = inspect.getsourcelines(placeholder_1)[1] + 1
+ expected_lineno_2 = inspect.getsourcelines(placeholder_2)[1] + 1
+
+ t_obj_2, t_obj_1 = t_stack.peek_traceable_objs()
+ self.assertEqual(expected_lineno_2, t_obj_2.lineno)
+ self.assertEqual(expected_lineno_1, t_obj_1.lineno)
+
+
+if __name__ == '__main__':
+ googletest.main()
diff --git a/tensorflow/python/grappler/layout_optimizer_test.py b/tensorflow/python/grappler/layout_optimizer_test.py
index 7d07c77c79..8cc971c61d 100644
--- a/tensorflow/python/grappler/layout_optimizer_test.py
+++ b/tensorflow/python/grappler/layout_optimizer_test.py
@@ -1340,7 +1340,7 @@ class LayoutOptimizerTest(test.TestCase):
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
- self.assertAllEqual(output_val_ref, output_val)
+ self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
diff --git a/tensorflow/python/keras/BUILD b/tensorflow/python/keras/BUILD
index 4056818a95..df409d2aa5 100755
--- a/tensorflow/python/keras/BUILD
+++ b/tensorflow/python/keras/BUILD
@@ -704,6 +704,17 @@ cuda_py_test(
],
)
+cuda_py_test(
+ name = "training_gpu_test",
+ size = "small",
+ srcs = ["engine/training_gpu_test.py"],
+ additional_deps = [
+ ":keras",
+ "//third_party/py/numpy",
+ "//tensorflow/python:client_testlib",
+ ],
+)
+
py_test(
name = "imagenet_utils_test",
size = "small",
@@ -721,7 +732,6 @@ py_test(
size = "medium",
srcs = ["preprocessing/image_test.py"],
srcs_version = "PY2AND3",
- tags = ["nomsan"], # TODO(b/110990716) reenable
deps = [
":keras",
"//tensorflow/python:client_testlib",
@@ -793,6 +803,19 @@ py_test(
)
py_test(
+ name = "training_utils_test",
+ size = "medium",
+ srcs = ["engine/training_utils_test.py"],
+ srcs_version = "PY2AND3",
+ tags = ["notsan"],
+ deps = [
+ ":keras",
+ "//tensorflow/python:client_testlib",
+ "//third_party/py/numpy",
+ ],
+)
+
+py_test(
name = "model_subclassing_test",
size = "medium",
srcs = ["model_subclassing_test.py"],
diff --git a/tensorflow/python/keras/activations.py b/tensorflow/python/keras/activations.py
index f608dea430..99645de736 100644
--- a/tensorflow/python/keras/activations.py
+++ b/tensorflow/python/keras/activations.py
@@ -128,20 +128,26 @@ def softsign(x):
@tf_export('keras.activations.relu')
-def relu(x, alpha=0., max_value=None):
+def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified Linear Unit.
+ With default values, it returns element-wise `max(x, 0)`.
+
+ Otherwise, it follows:
+ `f(x) = max_value` for `x >= max_value`,
+ `f(x) = x` for `threshold <= x < max_value`,
+ `f(x) = alpha * (x - threshold)` otherwise.
+
Arguments:
- x: Input tensor.
- alpha: Slope of the negative part. Defaults to zero.
- max_value: Maximum value for the output.
+ x: A tensor or variable.
+ alpha: A scalar, slope of negative section (default=`0.`).
+ max_value: float. Saturation threshold.
+ threshold: float. Threshold value for thresholded activation.
Returns:
- The (leaky) rectified linear unit activation: `x` if `x > 0`,
- `alpha * x` if `x < 0`. If `max_value` is defined, the result
- is truncated to this value.
+ A tensor.
"""
- return K.relu(x, alpha=alpha, max_value=max_value)
+ return K.relu(x, alpha=alpha, max_value=max_value, threshold=threshold)
@tf_export('keras.activations.tanh')
diff --git a/tensorflow/python/keras/applications/mobilenet.py b/tensorflow/python/keras/applications/mobilenet.py
index e56c695a28..7285e03963 100644
--- a/tensorflow/python/keras/applications/mobilenet.py
+++ b/tensorflow/python/keras/applications/mobilenet.py
@@ -72,13 +72,9 @@ from __future__ import print_function
import os
from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import constraints
-from tensorflow.python.keras import initializers
-from tensorflow.python.keras import regularizers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras.applications.imagenet_utils import decode_predictions
-from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.layers import Activation
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import Conv2D
@@ -87,10 +83,10 @@ from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras.layers import Input
+from tensorflow.python.keras.layers import ReLU
from tensorflow.python.keras.layers import Reshape
from tensorflow.python.keras.layers import ZeroPadding2D
from tensorflow.python.keras.models import Model
-from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
@@ -100,10 +96,6 @@ from tensorflow.python.util.tf_export import tf_export
BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
-def relu6(x):
- return K.relu(x, max_value=6)
-
-
@tf_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
@@ -130,12 +122,6 @@ def MobileNet(input_shape=None,
classes=1000):
"""Instantiates the MobileNet architecture.
- To load a MobileNet model via `load_model`, import the custom
- objects `relu6` and pass them to the `custom_objects` parameter.
- E.g.
- model = load_model('mobilenet.h5', custom_objects={
- 'relu6': mobilenet.relu6})
-
Arguments:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
@@ -412,7 +398,7 @@ def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
strides=strides,
name='conv1')(x)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
- return Activation(relu6, name='conv1_relu')(x)
+ return ReLU(6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
@@ -479,7 +465,7 @@ def _depthwise_conv_block(inputs,
use_bias=False,
name='conv_dw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
- x = Activation(relu6, name='conv_dw_%d_relu' % block_id)(x)
+ x = ReLU(6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(
pointwise_conv_filters, (1, 1),
@@ -489,4 +475,4 @@ def _depthwise_conv_block(inputs,
name='conv_pw_%d' % block_id)(
x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
- return Activation(relu6, name='conv_pw_%d_relu' % block_id)(x)
+ return ReLU(6, name='conv_pw_%d_relu' % block_id)(x)
diff --git a/tensorflow/python/keras/backend.py b/tensorflow/python/keras/backend.py
index cb3423598b..38794f1612 100644
--- a/tensorflow/python/keras/backend.py
+++ b/tensorflow/python/keras/backend.py
@@ -3372,26 +3372,48 @@ def in_test_phase(x, alt, training=None):
@tf_export('keras.backend.relu')
-def relu(x, alpha=0., max_value=None):
+def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
+ Otherwise, it follows:
+ `f(x) = max_value` for `x >= max_value`,
+ `f(x) = x` for `threshold <= x < max_value`,
+ `f(x) = alpha * (x - threshold)` otherwise.
+
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
- max_value: Saturation threshold.
+ max_value: float. Saturation threshold.
+ threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
+ clip_max = max_value is not None
+
if alpha != 0.:
- negative_part = nn.relu(-x)
- x = nn.relu(x)
- if max_value is not None:
+ if threshold != 0:
+ negative_part = nn.relu(-x + threshold)
+ else:
+ negative_part = nn.relu(-x)
+
+ if threshold != 0:
+ # computes x for x > threshold else 0
+ x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
+ elif max_value == 6:
+ # if no threshold, then can use nn.relu6 native TF op for performance
+ x = nn.relu6(x)
+ clip_max = False
+ else:
+ x = nn.relu(x)
+
+ if clip_max:
max_value = _to_tensor(max_value, x.dtype.base_dtype)
zero = _to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
+
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
@@ -3458,7 +3480,7 @@ def softsign(x):
@tf_export('keras.backend.categorical_crossentropy')
-def categorical_crossentropy(target, output, from_logits=False):
+def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
@@ -3468,28 +3490,33 @@ def categorical_crossentropy(target, output, from_logits=False):
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
+ axis: Int specifying the channels axis. `axis=-1` corresponds to data
+ format `channels_last', and `axis=1` corresponds to data format
+ `channels_first`.
Returns:
Output tensor.
+
+ Raises:
+ ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
+ rank = len(output.get_shape())
+ axis = axis % rank
# Note: nn.softmax_cross_entropy_with_logits_v2
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
- output = output / math_ops.reduce_sum( # pylint: disable=g-no-augmented-assignment
- output, len(output.get_shape()) - 1, True)
+ output = output / math_ops.reduce_sum(output, axis, True)
# manual computation of crossentropy
epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
- return -math_ops.reduce_sum(
- target * math_ops.log(output),
- axis=len(output.get_shape()) - 1)
+ return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
return nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
@tf_export('keras.backend.sparse_categorical_crossentropy')
-def sparse_categorical_crossentropy(target, output, from_logits=False):
+def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
@@ -3499,10 +3526,22 @@ def sparse_categorical_crossentropy(target, output, from_logits=False):
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
+ axis: Int specifying the channels axis. `axis=-1` corresponds to data
+ format `channels_last', and `axis=1` corresponds to data format
+ `channels_first`.
Returns:
Output tensor.
+
+ Raises:
+ ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
+ rank = len(output.get_shape())
+ axis = axis % rank
+ if axis != rank - 1:
+ permutation = list(range(axis)) + list(range(axis + 1, rank)) + [axis]
+ output = array_ops.transpose(output, perm=permutation)
+
# Note: nn.sparse_softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
diff --git a/tensorflow/python/keras/backend_test.py b/tensorflow/python/keras/backend_test.py
index 36478ea089..40e7910061 100644
--- a/tensorflow/python/keras/backend_test.py
+++ b/tensorflow/python/keras/backend_test.py
@@ -23,6 +23,7 @@ import scipy.sparse
from tensorflow.python import keras
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@@ -490,6 +491,66 @@ class BackendLinearAlgebraTest(test.TestCase):
input_shape_a=(4, 7),
input_shape_b=(4, 7))
+ def test_relu(self):
+ x = ops.convert_to_tensor([[-4, 0], [2, 7]], 'float32')
+ with self.test_session():
+ # standard relu
+ relu_op = keras.backend.relu(x)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
+
+ # alpha
+ relu_op = keras.backend.relu(x, alpha=0.5)
+ self.assertAllClose(keras.backend.eval(relu_op), [[-2, 0], [2, 7]])
+
+ # max_value < some elements
+ relu_op = keras.backend.relu(x, max_value=5)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 5]])
+
+ # nn.relu6 used
+ relu_op = keras.backend.relu(x, max_value=6)
+ self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 6]])
+
+ # max value > 6
+ relu_op = keras.backend.relu(x, max_value=10)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
+
+ # max value is float
+ relu_op = keras.backend.relu(x, max_value=4.3)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 4.3]])
+
+ # max value == 0
+ relu_op = keras.backend.relu(x, max_value=0)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 0]])
+
+ # alpha and max_value
+ relu_op = keras.backend.relu(x, alpha=0.25, max_value=3)
+ self.assertAllClose(keras.backend.eval(relu_op), [[-1, 0], [2, 3]])
+
+ # threshold
+ relu_op = keras.backend.relu(x, threshold=3)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 7]])
+
+ # threshold is float
+ relu_op = keras.backend.relu(x, threshold=1.5)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [2, 7]])
+
+ # threshold is negative
+ relu_op = keras.backend.relu(x, threshold=-5)
+ self.assertAllClose(keras.backend.eval(relu_op), [[-4, 0], [2, 7]])
+
+ # threshold and max_value
+ relu_op = keras.backend.relu(x, threshold=3, max_value=5)
+ self.assertAllClose(keras.backend.eval(relu_op), [[0, 0], [0, 5]])
+
+ # threshold and alpha
+ relu_op = keras.backend.relu(x, alpha=0.25, threshold=4)
+ self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 7]])
+
+ # threshold, alpha, and max_value
+ relu_op = keras.backend.relu(x, alpha=0.25, threshold=4, max_value=5)
+ self.assertAllClose(keras.backend.eval(relu_op), [[-2, -1], [-0.5, 5]])
+
class BackendShapeOpsTest(test.TestCase):
diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py
index 5d66db232a..d1b9dc27bd 100644
--- a/tensorflow/python/keras/callbacks.py
+++ b/tensorflow/python/keras/callbacks.py
@@ -31,13 +31,16 @@ import time
import numpy as np
import six
+from tensorflow.python.framework import dtypes
from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import optimizers
+from tensorflow.python.keras.engine.training_utils import standardize_input_data
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
-from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
+from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
@@ -644,35 +647,17 @@ class LearningRateScheduler(Callback):
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
- # TODO(yashkatariya): Change the property checking when the learning
- # rate attribute is unified across all TF Optimizers.
- if isinstance(self.model.optimizer, optimizers.TFOptimizer):
- if not hasattr(self.model.optimizer.optimizer, '_lr') and not hasattr(
- self.model.optimizer.optimizer, '_learning_rate'):
- raise ValueError(
- 'TF Optimizer must have a "_lr" or "_learning_rate" attribute.')
- else:
- opt = self.model.optimizer.optimizer
- if hasattr(opt, '_lr'):
- opt_lr = Variable(opt._lr) # pylint: disable=protected-access
- elif hasattr(opt, '_learning_rate'):
- opt_lr = Variable(opt._learning_rate) # pylint: disable=protected-access
- else:
- if not hasattr(self.model.optimizer, 'lr'):
- raise ValueError('Optimizer must have a "lr" attribute.')
- else:
- opt = self.model.optimizer
- opt_lr = opt.lr
-
+ if not hasattr(self.model.optimizer, 'lr'):
+ raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
- lr = float(K.get_value(opt_lr))
+ lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
- K.set_value(opt_lr, lr)
+ K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
@@ -717,7 +702,9 @@ class TensorBoard(Callback):
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
- layers will be saved.
+ layers will be saved. If set to 0, embeddings won't be computed.
+ Data to be visualized in TensorBoard's Embedding tab must be passed
+ as `embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
@@ -725,6 +712,10 @@ class TensorBoard(Callback):
[details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
+ embeddings_data: data to be embedded at layers specified in
+ `embeddings_layer_names`. Numpy array (if the model has a single
+ input) or list of Numpy arrays (if the model has multiple inputs).
+ Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding)
"""
# pylint: enable=line-too-long
@@ -735,7 +726,11 @@ class TensorBoard(Callback):
batch_size=32,
write_graph=True,
write_grads=False,
- write_images=False):
+ write_images=False,
+ embeddings_freq=0,
+ embeddings_layer_names=None,
+ embeddings_metadata=None,
+ embeddings_data=None):
super(TensorBoard, self).__init__()
self.log_dir = log_dir
self.histogram_freq = histogram_freq
@@ -745,8 +740,13 @@ class TensorBoard(Callback):
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
+ self._total_batches_seen = 0
# abstracted writer class to be able to stub for testing
self._writer_class = tf_summary.FileWriter
+ self.embeddings_freq = embeddings_freq
+ self.embeddings_layer_names = embeddings_layer_names
+ self.embeddings_metadata = embeddings_metadata
+ self.embeddings_data = embeddings_data
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
@@ -798,7 +798,11 @@ class TensorBoard(Callback):
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
- tf_summary.histogram('{}_out'.format(layer.name), layer.output)
+ if isinstance(layer.output, list):
+ for i, output in enumerate(layer.output):
+ tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
+ else:
+ tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
@@ -806,12 +810,98 @@ class TensorBoard(Callback):
else:
self.writer = self._writer_class(self.log_dir)
+ # If both embedding_freq and embeddings_data are available, we will
+ # visualize embeddings.
+ if self.embeddings_freq and self.embeddings_data is not None:
+ self.embeddings_data = standardize_input_data(self.embeddings_data,
+ model.input_names)
+
+ # If embedding_layer_names are not provided, get all of the embedding
+ # layers from the model.
+ embeddings_layer_names = self.embeddings_layer_names
+ if not embeddings_layer_names:
+ embeddings_layer_names = [
+ layer.name
+ for layer in self.model.layers
+ if type(layer).__name__ == 'Embedding'
+ ]
+
+ self.assign_embeddings = []
+ embeddings_vars = {}
+
+ self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
+ self.step = step = array_ops.placeholder(dtypes.int32)
+
+ for layer in self.model.layers:
+ if layer.name in embeddings_layer_names:
+ embedding_input = self.model.get_layer(layer.name).output
+ embedding_size = np.prod(embedding_input.shape[1:])
+ embedding_input = array_ops.reshape(embedding_input,
+ (step, int(embedding_size)))
+ shape = (self.embeddings_data[0].shape[0], int(embedding_size))
+ embedding = variables.Variable(
+ array_ops.zeros(shape), name=layer.name + '_embedding')
+ embeddings_vars[layer.name] = embedding
+ batch = state_ops.assign(embedding[batch_id:batch_id + step],
+ embedding_input)
+ self.assign_embeddings.append(batch)
+
+ self.saver = saver.Saver(list(embeddings_vars.values()))
+
+ # Create embeddings_metadata dictionary
+ if isinstance(self.embeddings_metadata, str):
+ embeddings_metadata = {
+ layer_name: self.embeddings_metadata
+ for layer_name in embeddings_vars.keys()
+ }
+ else:
+ # If embedding_metadata is already a dictionary
+ embeddings_metadata = self.embeddings_metadata
+
+ try:
+ from tensorboard.plugins import projector
+ except ImportError:
+ raise ImportError('Failed to import TensorBoard. Please make sure that '
+ 'TensorBoard integration is complete."')
+
+ # TODO(psv): Add integration tests to test embedding visualization
+ # with TensorBoard callback. We are unable to write a unit test for this
+ # because TensorBoard dependency assumes TensorFlow package is installed.
+ config = projector.ProjectorConfig()
+ for layer_name, tensor in embeddings_vars.items():
+ embedding = config.embeddings.add()
+ embedding.tensor_name = tensor.name
+
+ if (embeddings_metadata is not None and
+ layer_name in embeddings_metadata):
+ embedding.metadata_path = embeddings_metadata[layer_name]
+
+ projector.visualize_embeddings(self.writer, config)
+
def _fetch_callback(self, summary):
self.writer.add_summary(
summary,
self._epoch + self._current_val_batch / self._validation_batches)
self._current_val_batch += 1
+ def _write_custom_summaries(self, step, logs=None):
+ """Writes metrics out as custom scalar summaries.
+
+ Arguments:
+ step: the global step to use for Tensorboard.
+ logs: dict. Keys are scalar summary names, values are
+ NumPy scalars.
+
+ """
+ logs = logs or {}
+ for name, value in logs.items():
+ summary = tf_summary.Summary()
+ summary_value = summary.value.add()
+ summary_value.simple_value = value.item()
+ summary_value.tag = name
+ self.writer.add_summary(summary, step)
+ self.writer.flush()
+
def on_train_begin(self, logs=None):
"""Checks if histogram summaries can be run."""
@@ -828,6 +918,16 @@ class TensorBoard(Callback):
raise ValueError(
'If printing histograms, validation data must have length > 0.')
+ def on_batch_end(self, batch, logs=None):
+ """Writes scalar summaries for metrics on every training batch."""
+ # Don't output batch_size and batch number as Tensorboard summaries
+ logs = logs or {}
+ batch_logs = {('batch_' + k): v
+ for k, v in logs.items()
+ if k not in ['batch', 'size']}
+ self._write_custom_summaries(self._total_batches_seen, batch_logs)
+ self._total_batches_seen += 1
+
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model test_function callbacks, reset batch count."""
@@ -844,7 +944,12 @@ class TensorBoard(Callback):
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
- logs = logs or {}
+ # don't output batch_size and
+ # batch number as Tensorboard summaries
+ logs = {('epoch_' + k): v
+ for k, v in logs.items()
+ if k not in ['batch', 'size']}
+ self._write_custom_summaries(epoch, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
@@ -853,15 +958,45 @@ class TensorBoard(Callback):
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
- for name, value in logs.items():
- if name in ['batch', 'size']:
- continue
- summary = tf_summary.Summary()
- summary_value = summary.value.add()
- summary_value.simple_value = value.item()
- summary_value.tag = name
- self.writer.add_summary(summary, epoch)
- self.writer.flush()
+ if self.embeddings_data is None and self.embeddings_freq:
+ raise ValueError('To visualize embeddings, embeddings_data must '
+ 'be provided.')
+
+ if self.embeddings_freq and self.embeddings_data is not None:
+ if epoch % self.embeddings_freq == 0:
+ # We need a second forward-pass here because we're passing
+ # the `embeddings_data` explicitly. This design allows to pass
+ # arbitrary data as `embeddings_data` and results from the fact
+ # that we need to know the size of the `tf.Variable`s which
+ # hold the embeddings in `set_model`. At this point, however,
+ # the `validation_data` is not yet set.
+
+ embeddings_data = self.embeddings_data
+ n_samples = embeddings_data[0].shape[0]
+ i = 0
+ while i < n_samples:
+ step = min(self.batch_size, n_samples - i)
+ batch = slice(i, i + step)
+
+ if isinstance(self.model.input, list):
+ feed_dict = {
+ model_input: embeddings_data[idx][batch]
+ for idx, model_input in enumerate(self.model.input)
+ }
+ else:
+ feed_dict = {self.model.input: embeddings_data[0][batch]}
+
+ feed_dict.update({self.batch_id: i, self.step: step})
+
+ if self.model.uses_learning_phase:
+ feed_dict[K.learning_phase()] = False
+
+ self.sess.run(self.assign_embeddings, feed_dict=feed_dict)
+ self.saver.save(self.sess,
+ os.path.join(self.log_dir, 'keras_embedding.ckpt'),
+ epoch)
+
+ i += self.batch_size
def on_train_end(self, logs=None):
self.writer.close()
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py
index 244d48591c..7d830078ce 100644
--- a/tensorflow/python/keras/callbacks_test.py
+++ b/tensorflow/python/keras/callbacks_test.py
@@ -29,16 +29,10 @@ import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
-from tensorflow.python.eager import context
-from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
-from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer import writer_cache
-from tensorflow.python.training.adam import AdamOptimizer
-from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
-
try:
import h5py # pylint:disable=g-import-not-at-top
@@ -376,76 +370,6 @@ class KerasCallbacksTest(test.TestCase):
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
- @test_util.run_in_graph_and_eager_modes
- def test_TF_LearningRateScheduler_Adam(self):
- with self.test_session():
- with context.eager_mode():
- np.random.seed(1337)
- (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
- train_samples=TRAIN_SAMPLES,
- test_samples=TEST_SAMPLES,
- input_shape=(INPUT_DIM,),
- num_classes=NUM_CLASSES)
- y_test = keras.utils.to_categorical(y_test)
- y_train = keras.utils.to_categorical(y_train)
- model = keras.models.Sequential()
- model.add(
- keras.layers.Dense(
- NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
- model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
- model.compile(
- loss='categorical_crossentropy',
- optimizer=AdamOptimizer(),
- metrics=['accuracy'])
- cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
- model.fit(
- x_train,
- y_train,
- batch_size=BATCH_SIZE,
- validation_data=(x_test, y_test),
- callbacks=cbks,
- epochs=5,
- verbose=0)
- opt_lr = model.optimizer.optimizer._lr
- self.assertLess(
- float(keras.backend.get_value(
- Variable(opt_lr))) - 0.2, keras.backend.epsilon())
-
- @test_util.run_in_graph_and_eager_modes
- def test_TF_LearningRateScheduler_GradientDescent(self):
- with self.test_session():
- with context.eager_mode():
- np.random.seed(1337)
- (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
- train_samples=TRAIN_SAMPLES,
- test_samples=TEST_SAMPLES,
- input_shape=(INPUT_DIM,),
- num_classes=NUM_CLASSES)
- y_test = keras.utils.to_categorical(y_test)
- y_train = keras.utils.to_categorical(y_train)
- model = keras.models.Sequential()
- model.add(
- keras.layers.Dense(
- NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
- model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
- model.compile(
- loss='categorical_crossentropy',
- optimizer=GradientDescentOptimizer(1e-3),
- metrics=['accuracy'])
- cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
- model.fit(
- x_train,
- y_train,
- batch_size=BATCH_SIZE,
- validation_data=(x_test, y_test),
- callbacks=cbks,
- epochs=5,
- verbose=0)
- opt_lr = model.optimizer.optimizer._learning_rate
- self.assertLess(
- float(keras.backend.get_value(
- Variable(opt_lr))) - 0.2, keras.backend.epsilon())
-
def test_ReduceLROnPlateau(self):
with self.test_session():
np.random.seed(1337)
@@ -1172,6 +1096,74 @@ class KerasCallbacksTest(test.TestCase):
assert os.path.exists(temp_dir)
+ def test_Tensorboard_batch_logging(self):
+
+ class FileWriterStub(object):
+
+ def __init__(self, logdir, graph=None):
+ self.logdir = logdir
+ self.graph = graph
+ self.batches_logged = []
+ self.summary_values = []
+ self.summary_tags = []
+
+ def add_summary(self, summary, step):
+ self.summary_values.append(summary.value[0].simple_value)
+ self.summary_tags.append(summary.value[0].tag)
+ self.batches_logged.append(step)
+
+ def flush(self):
+ pass
+
+ def close(self):
+ pass
+
+ logdir = 'fake_dir'
+
+ # log every batch
+ tb_cbk = keras.callbacks.TensorBoard(logdir)
+ tb_cbk.writer = FileWriterStub(logdir)
+
+ for batch in range(5):
+ tb_cbk.on_batch_end(batch, {'acc': np.float32(batch)})
+ self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
+ self.assertEqual(tb_cbk.writer.summary_values, [0., 1., 2., 3., 4.])
+ self.assertEqual(tb_cbk.writer.summary_tags, ['batch_acc'] * 5)
+
+ def test_Tensorboard_epoch_and_batch_logging(self):
+
+ class FileWriterStub(object):
+
+ def __init__(self, logdir, graph=None):
+ self.logdir = logdir
+ self.graph = graph
+
+ def add_summary(self, summary, step):
+ if 'batch_' in summary.value[0].tag:
+ self.batch_summary = (step, summary)
+ elif 'epoch_' in summary.value[0].tag:
+ self.epoch_summary = (step, summary)
+
+ def flush(self):
+ pass
+
+ def close(self):
+ pass
+
+ logdir = 'fake_dir'
+
+ tb_cbk = keras.callbacks.TensorBoard(logdir)
+ tb_cbk.writer = FileWriterStub(logdir)
+
+ tb_cbk.on_batch_end(0, {'acc': np.float32(5.0)})
+ tb_cbk.on_epoch_end(0, {'acc': np.float32(10.0)})
+ batch_step, batch_summary = tb_cbk.writer.batch_summary
+ self.assertEqual(batch_step, 0)
+ self.assertEqual(batch_summary.value[0].simple_value, 5.0)
+ epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
+ self.assertEqual(epoch_step, 0)
+ self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
+
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py
index e02792208b..b41f6ee03b 100644
--- a/tensorflow/python/keras/engine/base_layer.py
+++ b/tensorflow/python/keras/engine/base_layer.py
@@ -723,9 +723,17 @@ class Layer(checkpointable.CheckpointableBase):
self._dtype = input_list[0].dtype.base_dtype.name
except AttributeError:
pass
+
if all(hasattr(x, 'shape') for x in input_list):
input_shapes = nest.map_structure(lambda x: x.shape, inputs)
- self.build(input_shapes)
+
+ if (not hasattr(self, '_is_graph_network') or
+ self.__class__.__name__ == 'Sequential'):
+ # Only if self is a layer or an instance of a sequential model do we
+ # need to build it.
+ self.build(input_shapes)
+ # We must set self.built since user defined build functions are not
+ # constrained to set self.built.
self.built = True
# Check input assumptions set after layer building, e.g. input shape.
diff --git a/tensorflow/python/keras/engine/network.py b/tensorflow/python/keras/engine/network.py
index a4d96de74f..752e9963ca 100644
--- a/tensorflow/python/keras/engine/network.py
+++ b/tensorflow/python/keras/engine/network.py
@@ -318,8 +318,8 @@ class Network(base_layer.Layer):
else:
self._expects_training_arg = False
self._call_convention = self._determine_call_convention(call_argspec)
- self.outputs = None
- self.inputs = None
+ self.outputs = []
+ self.inputs = []
self.built = False
def _determine_call_convention(self, call_argspec):
diff --git a/tensorflow/python/keras/engine/sequential.py b/tensorflow/python/keras/engine/sequential.py
index 371504a503..41cdfda660 100644
--- a/tensorflow/python/keras/engine/sequential.py
+++ b/tensorflow/python/keras/engine/sequential.py
@@ -213,13 +213,31 @@ class Sequential(Model):
self.outputs = [self.layers[-1].output]
self.build()
- @checkpointable.no_automatic_dependency_tracking
def build(self, input_shape=None):
- if input_shape and not self.inputs:
- batch_shape = tuple(input_shape)
+ self._set_inputs_and_outputs(input_shape=input_shape)
+
+ def symbolic_set_inputs(self, inputs):
+ self._set_inputs_and_outputs(tensor=inputs)
+
+ @checkpointable.no_automatic_dependency_tracking
+ def _set_inputs_and_outputs(self, input_shape=None, tensor=None):
+ """Set model's input and output specs based on the input received.
+
+ If `tensor` is provided, `input_shape` is not required.
+
+ Args:
+ input_shape: Optional shape of input.
+ tensor: Optional existing tensor to wrap into the `Input` layer.
+ """
+ if not self.inputs:
dtype = K.floatx()
- x = Input(
- batch_shape=batch_shape, dtype=dtype, name=self.name + '_input')
+ if tensor is not None:
+ batch_shape = (None,) + tuple(tensor.get_shape().as_list()[1:])
+ x = Input(dtype=dtype, name=self.name + '_input', tensor=tensor)
+ elif input_shape is not None:
+ batch_shape = tuple(input_shape)
+ x = Input(
+ batch_shape=batch_shape, dtype=dtype, name=self.name + '_input')
self.inputs = [x]
for layer in self._layers:
x = layer(x)
diff --git a/tensorflow/python/keras/engine/sequential_test.py b/tensorflow/python/keras/engine/sequential_test.py
index 0f54e29cee..4f4adca333 100644
--- a/tensorflow/python/keras/engine/sequential_test.py
+++ b/tensorflow/python/keras/engine/sequential_test.py
@@ -22,7 +22,6 @@ import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
-from tensorflow.python.eager import context
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@@ -104,9 +103,6 @@ class TestSequential(test.TestCase):
@tf_test_util.run_in_graph_and_eager_modes
def test_sequential_deferred_build_with_dataset_iterators(self):
- if not context.executing_eagerly():
- # TODO(psv/fchollet): Add support for this use case in graph mode.
- return
num_hidden = 5
input_dim = 3
num_classes = 2
@@ -136,6 +132,48 @@ class TestSequential(test.TestCase):
[None, num_classes])
self.assertEqual(len(model.weights), 2 * 2)
+ def test_training_and_eval_methods_on_symbolic_tensors(self):
+ with self.test_session():
+
+ def create_model():
+ model = keras.Sequential()
+ model.add(keras.layers.Dense(10, activation='relu'))
+ model.add(keras.layers.Dense(4, activation='softmax'))
+
+ model.compile(
+ optimizer=rmsprop.RMSPropOptimizer(1e-3),
+ loss='categorical_crossentropy',
+ metrics=['accuracy'])
+ return model
+
+ inputs = keras.backend.zeros(shape=(10, 3))
+ targets = keras.backend.zeros(shape=(10, 4))
+
+ model = create_model()
+ model.fit(inputs, targets, epochs=10, steps_per_epoch=30)
+
+ model = create_model()
+ model.evaluate(inputs, targets, steps=2, verbose=0)
+
+ model = create_model()
+ model.predict(inputs, steps=2)
+
+ model = create_model()
+ model.train_on_batch(inputs, targets)
+
+ model = create_model()
+ model.test_on_batch(inputs, targets)
+
+ model = create_model()
+ model.fit(
+ inputs,
+ targets,
+ epochs=1,
+ steps_per_epoch=2,
+ verbose=0,
+ validation_data=(inputs, targets),
+ validation_steps=2)
+
@tf_test_util.run_in_graph_and_eager_modes
def test_invalid_use_cases(self):
# Added objects must be layer instances
diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py
index bd03f4871f..4df739254b 100644
--- a/tensorflow/python/keras/engine/training.py
+++ b/tensorflow/python/keras/engine/training.py
@@ -27,6 +27,7 @@ from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import losses
@@ -43,6 +44,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training.checkpointable import base as checkpointable
+from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
@@ -217,10 +219,9 @@ class Model(Network):
for name in self.output_names:
if name not in loss:
logging.warning(
- 'Output "' + name + '" missing from loss dictionary. '
- 'We assume this was done on purpose, '
- 'and we will not be expecting '
- 'any data to be passed to "' + name + '" during training.')
+ 'Output "' + name + '" missing from loss dictionary. We assume '
+ 'this was done on purpose. The fit and evaluate APIs will not be '
+ 'expecting any data to be passed to "' + name + '".')
loss_functions.append(losses.get(loss.get(name)))
elif isinstance(loss, list):
if len(loss) != len(self.outputs):
@@ -561,6 +562,95 @@ class Model(Network):
trainable_weights = self.trainable_weights
self._collected_trainable_weights = trainable_weights
+ def build(self, input_shape):
+ """Build the model based on input shapes received.
+
+ This is to be used for subclassed models, which do not know at instantiation
+ time what their inputs look like.
+
+ Args:
+ input_shape: Single tuple, TensorShape, or list of shapes, where shapes
+ are tuples, integers, or TensorShapes.
+
+ Raises:
+ ValueError:
+ 1. In case of invalid user-provided data (not of type tuple,
+ list, or TensorShape).
+ 2. If the model requires call arguments that are agnostic
+ to the input shapes (positional or kwarg in call signature).
+ 3. If not all layers were properly built.
+ 4. If float type inputs are not supported within the layers.
+
+ In each of these cases, the user should build their model by calling it
+ on real tensor data.
+ """
+ if self._is_graph_network:
+ self.built = True
+ return
+
+ # If subclass network
+ if input_shape is None:
+ raise ValueError('Input shape must be defined when calling build on a '
+ 'model subclass network.')
+ valid_types = (tuple, list, tensor_shape.TensorShape)
+ if not isinstance(input_shape, valid_types):
+ raise ValueError('Specified input shape is not one of the valid types. '
+ 'Please specify a batch input shape of type tuple or '
+ 'list of input shapes. User provided '
+ 'input type: {}'.format(type(input_shape)))
+
+ def _generate_dummy_data_from_shape(shape):
+ if isinstance(shape, tensor_shape.TensorShape):
+ shape = shape.as_list()
+
+ # Replace Nones in input shape with dummy `1` value
+ shape = [x.value if isinstance(x, tensor_shape.Dimension) else x
+ for x in shape]
+ shape = [1 if x is None else x for x in shape]
+ return array_ops.ones(shape, dtype=K.floatx())
+
+ if input_shape and not self.inputs:
+ if isinstance(input_shape, list):
+ # List of input shapes
+ x = [_generate_dummy_data_from_shape(shape) for shape in input_shape]
+ else:
+ x = _generate_dummy_data_from_shape(input_shape)
+
+ kwargs = {}
+ num_call_args = len(tf_inspect.getargspec(self.call).args)
+ if self._expects_training_arg and num_call_args == 3:
+ # Has call signature of call(self, input, training)
+ kwargs['training'] = False
+ elif num_call_args > 2:
+ # Has invalid call signature of call(self, input, *args, **kwargs)
+ raise ValueError('Currently, you cannot build your model if it has '
+ 'positional or keyword arguments that are not '
+ 'inputs to the model, but are required for its '
+ '`call` method. Instead, in order to instantiate '
+ 'and build your model, `call` your model on real '
+ 'tensor data with all expected call arguments.')
+
+ try:
+ self.call(x, **kwargs)
+ except (errors.InvalidArgumentError, TypeError):
+ raise ValueError('You cannot build your model by calling `build` '
+ 'if your layers do not support float type inputs. '
+ 'Instead, in order to instantiate and build your '
+ 'model, `call` your model on real tensor data (of '
+ 'the correct dtype).')
+
+ if self._layers:
+ self._track_layers(self._layers)
+ if self.layers:
+ for layer in self.layers:
+ if not layer.built:
+ raise ValueError('Layer: {} was not built in your model. Calling '
+ '`build` manually on a subclassed model is only '
+ 'allowed for models with a static topology. '
+ 'In this case, you can build your model by '
+ 'calling it on real tensor data.'.format(layer))
+ self.built = True
+
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
@@ -897,7 +987,11 @@ class Model(Network):
for output_shape, loss_fn in zip(self._feed_output_shapes,
self._feed_loss_fns):
if loss_fn is losses.sparse_categorical_crossentropy:
- feed_output_shapes.append(output_shape[:-1] + (1,))
+ if K.image_data_format() == 'channels_first':
+ feed_output_shapes.append(
+ (output_shape[0], 1) + output_shape[2:])
+ else:
+ feed_output_shapes.append(output_shape[:-1] + (1,))
elif (not hasattr(loss_fn, '__name__') or
getattr(losses, loss_fn.__name__, None) is None):
# If `loss_fn` is not a function (e.g. callable class)
@@ -988,10 +1082,14 @@ class Model(Network):
inputs = inputs[0]
if tensor_util.is_tensor(inputs):
- input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
+ if context.executing_eagerly():
+ input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])
+ self.build(input_shape=input_shape)
+ else:
+ self.symbolic_set_inputs(inputs)
else:
input_shape = (None,) + inputs.shape[1:]
- self.build(input_shape=input_shape)
+ self.build(input_shape=input_shape)
elif context.executing_eagerly():
self._eager_set_inputs(inputs)
else:
diff --git a/tensorflow/python/keras/engine/training_eager.py b/tensorflow/python/keras/engine/training_eager.py
index c78684c9f4..397de42985 100644
--- a/tensorflow/python/keras/engine/training_eager.py
+++ b/tensorflow/python/keras/engine/training_eager.py
@@ -34,7 +34,6 @@ from tensorflow.python.keras import losses
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import generic_utils
-from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
@@ -194,7 +193,8 @@ def iterator_fit_loop(model,
callbacks=None,
callback_metrics=None,
validation_steps=None,
- do_validation=False):
+ do_validation=False,
+ batch_size=None):
"""Fit function for eager execution when input is given as dataset iterator.
Updates the given epoch logs.
@@ -224,16 +224,23 @@ def iterator_fit_loop(model,
validation_steps: Number of steps to run validation for (only if doing
validation from data tensors). Ignored with default value of `None`.
do_validation: Boolean value indicating whether we should do validation.
+ batch_size: int, val_inputs and val_targets will be evaled batch by
+ batch with size batch_size if they are array.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
+
+ # make sure either x,y or x,y,sample_weights is provided
+ if (not isinstance(inputs.output_shapes, (list, tuple)) or
+ len(inputs.output_shapes) not in (2, 3)):
+ raise ValueError('Please provide either inputs and targets'
+ 'or inputs, targets, and sample_weights')
+
for step_index in range(steps_per_epoch):
- batch_logs = {}
- batch_logs['batch'] = step_index
- batch_logs['size'] = 1
+ batch_logs = {'batch': step_index, 'size': 1}
callbacks.on_batch_begin(step_index, batch_logs)
# Get data from the iterator.
@@ -247,19 +254,21 @@ def iterator_fit_loop(model,
'batches (in this case, %d batches).' % steps_per_epoch * epochs)
break
- if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
- raise ValueError('Please provide data as a list or tuple of 2 elements '
- ' - input and target pair. Received %s' % next_element)
- x, y = next_element
+ if len(inputs.output_shapes) == 2:
+ x, y = next_element
+ sample_weights = None
+ else:
+ x, y, sample_weights = next_element
# Validate and standardize data.
x, y, sample_weights = model._standardize_user_data(
- x, y, class_weight=class_weight)
+ x, y, sample_weight=sample_weights, class_weight=class_weight)
x = training_utils.cast_if_floating_dtype(x)
y = training_utils.cast_if_floating_dtype(y)
if sample_weights:
sample_weights = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
+ training_utils.cast_if_floating_dtype(
+ ops.convert_to_tensor(val, dtype=backend.floatx()))
if val is not None else None for val in sample_weights
]
@@ -307,122 +316,8 @@ def iterator_fit_loop(model,
val_targets,
sample_weights=val_sample_weights,
steps=validation_steps,
- verbose=0)
- if not isinstance(val_outs, list):
- val_outs = [val_outs]
- # Same labels assumed.
- for l, o in zip(out_labels, val_outs):
- epoch_logs['val_' + l] = o
-
-
-def batch_fit_loop(model,
- inputs,
- targets,
- epoch_logs,
- index_array,
- out_labels,
- callback_model,
- batch_size,
- sample_weights=None,
- val_inputs=None,
- val_targets=None,
- val_sample_weights=None,
- callbacks=None,
- shuffle=True,
- num_train_samples=None,
- do_validation=False):
- """Fit function for eager execution when input is given as arrays or tensors.
-
- Updates the given epoch logs.
-
- Arguments:
- model: Instance of the `Model`.
- inputs: List of input arrays.
- targets: List of target arrays.
- epoch_logs: Dictionary of logs from every epoch.
- index_array: Index array generated from number of training samples.
- out_labels: Output labels generated from model metric names.
- callback_model: Instance of `Model` to callback.
- batch_size: Integer batch size or None if unknown.
- sample_weights: Optional list of sample weight arrays.
- val_inputs: Input data for validation.
- val_targets: Target data for validation.
- val_sample_weights: Sample weight data for validation.
- callbacks: List of callbacks to be called during training.
- shuffle: Whether to shuffle the data at the beginning of each epoch.
- num_train_samples: Integer number of training samples.
- do_validation: Boolean value indicating whether we should do validation.
- """
- # TODO(psv): Create a dataset iterator instead of manually creating batches
- # here and in batch_test_loop, batch_predict_loop.
- if shuffle == 'batch':
- index_array = model._batch_shuffle(index_array, batch_size)
- elif shuffle:
- np.random.shuffle(index_array)
-
- batches = generic_utils.make_batches(num_train_samples, batch_size)
-
- for batch_index, (batch_start, batch_end) in enumerate(batches):
- batch_ids = index_array[batch_start:batch_end]
- inputs_batch = slice_arrays(inputs, batch_ids, contiguous=not shuffle)
- targets_batch = slice_arrays(targets, batch_ids, contiguous=not shuffle)
- if sample_weights:
- sample_weights_batch = slice_arrays(
- sample_weights, batch_ids, contiguous=not shuffle)
- else:
- sample_weights_batch = None
- batch_logs = {}
- batch_logs['batch'] = batch_index
- batch_logs['size'] = len(batch_ids)
-
- callbacks.on_batch_begin(batch_index, batch_logs)
-
- inputs_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in inputs_batch
- ]
- targets_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in targets_batch
- ]
- if sample_weights:
- sample_weights_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- if val is not None else None for val in sample_weights_batch
- ]
-
- outs, loss, loss_metrics = _process_single_batch(
- model,
- inputs_batch,
- targets_batch,
- sample_weights=sample_weights_batch,
- training=True)
-
- if not isinstance(outs, list):
- outs = [outs]
-
- for l, o in zip(out_labels, outs):
- batch_logs[l] = o
- # Required for eager execution
- metrics_results = _eager_metrics_fn(model, outs, targets_batch)
- batch_logs['loss'] = tensor_util.constant_value(backend.mean(loss))
-
- for k, v in zip(model.metrics_names,
- [backend.mean(loss)] + loss_metrics + metrics_results):
- batch_logs[k] = tensor_util.constant_value(v)
- callbacks.on_batch_end(batch_index, batch_logs)
- if callback_model.stop_training:
- break
-
- if batch_index == len(batches) - 1: # Last batch.
- if do_validation:
- val_outs = test_loop(
- model,
- val_inputs,
- val_targets,
- sample_weights=val_sample_weights,
- batch_size=batch_size,
- verbose=0)
+ verbose=0,
+ batch_size=batch_size)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# Same labels assumed.
@@ -451,6 +346,11 @@ def iterator_test_loop(model, inputs, steps, verbose=0):
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
+ # make sure either x,y or x,y,sample_weights is provided
+ if (not isinstance(inputs.output_shapes, (list, tuple)) or
+ len(inputs.output_shapes) < 2 or len(inputs.output_shapes) > 3):
+ raise ValueError('Please provide either inputs and targets'
+ 'or inputs, targets, and sample_weights')
outs = []
num_samples = 0
if verbose == 1:
@@ -466,10 +366,11 @@ def iterator_test_loop(model, inputs, steps, verbose=0):
'(in this case, %d batches).', steps)
break
- if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
- raise ValueError('Please provide data as a list or tuple of 2 elements '
- ' - input and target pair. Received %s' % next_element)
- x, y = next_element
+ if len(inputs.output_shapes) == 2:
+ x, y = next_element
+ sample_weights = None
+ else:
+ x, y, sample_weights = next_element
# Validate and standardize data.
x, y, sample_weights = model._standardize_user_data(x, y)
@@ -512,94 +413,6 @@ def iterator_test_loop(model, inputs, steps, verbose=0):
return outs
-def batch_test_loop(model,
- inputs,
- targets,
- batch_size,
- sample_weights=None,
- verbose=0):
- """Test function for eager execution when input is given as arrays or tensors.
-
- Arguments:
- model: Model instance that is being evaluated in Eager mode.
- inputs: List of input arrays.
- targets: List of target arrays.
- batch_size: Integer batch size.
- sample_weights: Optional list of sample weight arrays.
- verbose: Verbosity mode.
-
- Returns:
- Scalar loss (if the model has a single output and no metrics)
- or list of scalars (if the model has multiple outputs
- and/or metrics). The attribute `model.metrics_names` will give you
- the display labels for the scalar outputs.
- """
- outs = []
- feed_data = inputs + targets
- if sample_weights:
- feed_data += sample_weights
- num_samples = training_utils.check_num_samples(
- feed_data, batch_size=batch_size)
- if verbose == 1:
- progbar = generic_utils.Progbar(target=num_samples)
- batches = generic_utils.make_batches(num_samples, batch_size)
- index_array = np.arange(num_samples)
- for batch_index, (batch_start, batch_end) in enumerate(batches):
- batch_ids = index_array[batch_start:batch_end]
- inputs_batch = slice_arrays(inputs, batch_ids)
- targets_batch = slice_arrays(targets, batch_ids)
- if sample_weights:
- sample_weights_batch = slice_arrays(sample_weights, batch_ids)
- else:
- sample_weights_batch = None
-
- inputs_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in inputs_batch
- ]
- targets_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in targets_batch
- ]
- if sample_weights:
- sample_weights_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- if val is not None else None for val in sample_weights_batch
- ]
-
- loss_outs, loss, loss_metrics = _model_loss(
- model,
- inputs_batch,
- targets_batch,
- sample_weights=sample_weights_batch,
- training=False)
- metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
- batch_outs = []
- for _, v in zip(model.metrics_names,
- [backend.mean(loss)] + loss_metrics + metrics_results):
- batch_outs.append(tensor_util.constant_value(v))
-
- if isinstance(batch_outs, list):
- if batch_index == 0:
- for _ in enumerate(batch_outs):
- outs.append(0.)
- for i, batch_out in enumerate(batch_outs):
- outs[i] += batch_out * len(batch_ids)
- else:
- if batch_index == 0:
- outs.append(0.)
- outs[0] += batch_outs * len(batch_ids)
-
- if verbose == 1:
- progbar.update(batch_end)
-
- for i in range(len(outs)):
- outs[i] /= num_samples
- if len(outs) == 1:
- return outs[0]
- return outs
-
-
def iterator_predict_loop(model, inputs, steps, verbose=0):
"""Predict function for eager execution when input is dataset iterator.
@@ -619,6 +432,12 @@ def iterator_predict_loop(model, inputs, steps, verbose=0):
expectations of the model.
"""
assert isinstance(inputs, iterator_ops.EagerIterator)
+ if not isinstance(inputs.output_shapes,
+ (list, tuple)) or len(inputs.output_shapes) > 2:
+ raise ValueError(
+ 'Please provide data as a list or tuple of 1 or 2 elements '
+ ' - input or input and target pair. Received %s. We do not use the '
+ '`target` value here.' % inputs.output_shapes)
outs = []
if verbose == 1:
progbar = generic_utils.Progbar(target=steps)
@@ -634,12 +453,8 @@ def iterator_predict_loop(model, inputs, steps, verbose=0):
'batches (in this case, %d batches).', steps)
break
- if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
- raise ValueError(
- 'Please provide data as a list or tuple of 2 elements '
- ' - input and target pair. Received %s. We do not use the '
- '`target` value here.' % next_element)
- x, _ = next_element
+ # expects a tuple, where first element of tuple represents inputs
+ x = next_element[0]
# Validate and standardize data.
x, _, _ = model._standardize_user_data(x)
@@ -670,99 +485,6 @@ def iterator_predict_loop(model, inputs, steps, verbose=0):
return outs
-def batch_predict_loop(model, inputs, batch_size, verbose=0):
- """Predict function for eager execution when input is arrays or tensors.
-
- Arguments:
- model: Instance of `Model`.
- inputs: List of input arrays.
- batch_size: Integer batch size.
- verbose: Verbosity mode.
-
- Returns:
- Array of predictions (if the model has a single output)
- or list of arrays of predictions (if the model has multiple outputs).
- """
- outs = []
- num_samples = training_utils.check_num_samples(inputs, batch_size)
- if verbose == 1:
- progbar = generic_utils.Progbar(target=num_samples)
- batches = generic_utils.make_batches(num_samples, batch_size)
- index_array = np.arange(num_samples)
- for batch_index, (batch_start, batch_end) in enumerate(batches):
- batch_ids = index_array[batch_start:batch_end]
- inputs_batch = slice_arrays(inputs, batch_ids)
-
- inputs_batch = [
- ops.convert_to_tensor(val, dtype=backend.floatx())
- for val in inputs_batch
- ]
-
- if len(inputs_batch) == 1:
- if model._expects_training_arg:
- batch_outs = model.call(inputs_batch[0], training=False)
- else:
- batch_outs = model.call(inputs_batch[0])
- else:
- if model._expects_training_arg:
- batch_outs = model.call(inputs_batch, training=False)
- else:
- batch_outs = model.call(inputs_batch)
-
- if not isinstance(batch_outs, list):
- batch_outs = [batch_outs]
- if batch_index == 0:
- # Pre-allocate the results arrays.
- for batch_out in batch_outs:
- dims = batch_out.shape[1:].dims
- dims_list = [d.value for d in dims]
- shape = (num_samples,) + tuple(dims_list)
- outs.append(np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
- for i, batch_out in enumerate(batch_outs):
- outs[i][batch_start:batch_end] = batch_out
- if verbose == 1:
- progbar.update(batch_end)
-
- if len(outs) == 1:
- return outs[0]
- return outs
-
-
-def slice_arrays(arrays, indices, contiguous=True):
- """Slices batches out of provided arrays (workaround for eager tensors).
-
- Unfortunately eager tensors don't have the same slicing behavior as
- Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
- hence we cannot use `generic_utils.slice_arrays` directly
- and we have to implement this workaround based on `concat`. This has a
- performance cost.
-
- Arguments:
- arrays: Single array or list of arrays.
- indices: List of indices in the array that should be included in the output
- batch.
- contiguous: Boolean flag indicating whether the indices are contiguous.
-
- Returns:
- Slice of data (either single array or list of arrays).
- """
- if any(tensor_util.is_tensor(x) for x in arrays):
- converted_to_list = False
- if not isinstance(arrays, list):
- converted_to_list = True
- arrays = [arrays]
- if not contiguous:
- entries = [[x[i:i + 1] for i in indices] for x in arrays]
- slices = [array_ops.concat(x, axis=0) for x in entries]
- else:
- slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
- if converted_to_list:
- slices = slices[0]
- return slices
- else:
- return generic_utils.slice_arrays(arrays, indices)
-
-
def _process_single_batch(model,
inputs,
targets,
@@ -935,19 +657,24 @@ def fit_loop(model,
Raises:
ValueError: In case of invalid argument values.
"""
+ # Convert training inputs to an EagerIterator
+ inputs, steps_per_epoch = training_utils.convert_to_iterator(
+ x=inputs,
+ y=targets,
+ sample_weights=sample_weights,
+ batch_size=batch_size,
+ steps_per_epoch=steps_per_epoch,
+ epochs=epochs,
+ shuffle=shuffle)
# Required for eager execution
with backend.learning_phase_scope(1):
do_validation = False
if val_inputs:
do_validation = True
- if (steps_per_epoch is None and verbose and inputs and
- hasattr(inputs[0], 'shape') and hasattr(val_inputs[0], 'shape')):
- print('Train on %d samples, validate on %d samples' %
- (inputs[0].shape[0], val_inputs[0].shape[0]))
num_train_samples = None
out_labels = None
- if steps_per_epoch is None or model._is_compiled:
+ if model._is_compiled:
out_labels = model.metrics_names
if do_validation:
callback_metrics = copy.copy(out_labels) + [
@@ -956,28 +683,10 @@ def fit_loop(model,
else:
callback_metrics = copy.copy(out_labels)
- if steps_per_epoch is None:
- if sample_weights:
- feed_data = inputs + targets + sample_weights
- else:
- feed_data = inputs + targets
- num_train_samples = training_utils.check_num_samples(
- feed_data,
- batch_size=batch_size,
- steps=steps_per_epoch,
- steps_name='steps_per_epoch')
-
- if num_train_samples is not None:
- index_array = np.arange(num_train_samples)
-
model.history = cbks.History()
callbacks = [cbks.BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
- if steps_per_epoch is not None:
- count_mode = 'steps'
- else:
- count_mode = 'samples'
- callbacks += [cbks.ProgbarLogger(count_mode)]
+ callbacks += [cbks.ProgbarLogger('steps')]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
@@ -1019,43 +728,24 @@ def fit_loop(model,
for epoch in range(initial_epoch, epochs):
callbacks.on_epoch_begin(epoch)
epoch_logs = {}
-
- if steps_per_epoch is not None:
- iterator_fit_loop(
- model,
- inputs,
- class_weight,
- steps_per_epoch=steps_per_epoch,
- callback_model=callback_model,
- out_labels=out_labels,
- epoch_logs=epoch_logs,
- val_inputs=val_inputs,
- val_targets=val_targets,
- val_sample_weights=val_sample_weights,
- epochs=epochs,
- verbose=verbose,
- callbacks=callbacks,
- callback_metrics=callback_metrics,
- validation_steps=validation_steps,
- do_validation=do_validation)
- else:
- batch_fit_loop(
- model,
- inputs,
- targets,
- epoch_logs=epoch_logs,
- index_array=index_array,
- out_labels=out_labels,
- callback_model=callback_model,
- batch_size=batch_size,
- sample_weights=sample_weights,
- val_inputs=val_inputs,
- val_targets=val_targets,
- val_sample_weights=val_sample_weights,
- callbacks=callbacks,
- shuffle=shuffle,
- num_train_samples=num_train_samples,
- do_validation=do_validation)
+ iterator_fit_loop(
+ model,
+ inputs,
+ class_weight,
+ steps_per_epoch=steps_per_epoch,
+ callback_model=callback_model,
+ out_labels=out_labels,
+ epoch_logs=epoch_logs,
+ val_inputs=val_inputs,
+ val_targets=val_targets,
+ val_sample_weights=val_sample_weights,
+ epochs=epochs,
+ verbose=verbose,
+ callbacks=callbacks,
+ callback_metrics=callback_metrics,
+ validation_steps=validation_steps,
+ do_validation=do_validation,
+ batch_size=batch_size)
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
@@ -1087,17 +777,14 @@ def test_loop(model, inputs, targets,
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
+ inputs, steps = training_utils.convert_to_iterator(
+ x=inputs,
+ y=targets,
+ sample_weights=sample_weights,
+ batch_size=batch_size,
+ steps_per_epoch=steps)
with backend.learning_phase_scope(0):
- if steps is not None:
- return iterator_test_loop(model, inputs, steps, verbose=verbose)
- else:
- return batch_test_loop(
- model,
- inputs,
- targets,
- batch_size=batch_size,
- sample_weights=sample_weights,
- verbose=verbose)
+ return iterator_test_loop(model, inputs, steps, verbose=verbose)
def predict_loop(model, inputs,
@@ -1121,8 +808,6 @@ def predict_loop(model, inputs,
(if the model has multiple outputs).
"""
with backend.learning_phase_scope(0):
- if steps is not None:
- return iterator_predict_loop(model, inputs, steps, verbose=verbose)
- else:
- return batch_predict_loop(
- model, inputs, batch_size=batch_size, verbose=verbose)
+ inputs, steps = training_utils.convert_to_iterator(
+ x=inputs, batch_size=batch_size, steps_per_epoch=steps)
+ return iterator_predict_loop(model, inputs, steps, verbose=verbose)
diff --git a/tensorflow/python/keras/engine/training_gpu_test.py b/tensorflow/python/keras/engine/training_gpu_test.py
new file mode 100644
index 0000000000..5825ce814f
--- /dev/null
+++ b/tensorflow/python/keras/engine/training_gpu_test.py
@@ -0,0 +1,125 @@
+# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for training routines."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python import keras
+from tensorflow.python.framework import test_util
+from tensorflow.python.keras import backend as K
+from tensorflow.python.keras.layers.convolutional import Conv2D
+from tensorflow.python.platform import test
+from tensorflow.python.training import rmsprop
+
+
+class TrainingGPUTest(test.TestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_model_with_crossentropy_losses_channels_first(self):
+ """Tests use of all crossentropy losses with `channels_first`.
+
+ Tests `sparse_categorical_crossentropy`, `categorical_crossentropy`,
+ and `binary_crossentropy`.
+ Verifies that evaluate gives the same result with either `channels_first`
+ or `channels_last` image_data_format.
+ """
+ def prepare_simple_model(input_tensor, loss_name, target):
+ axis = 1 if K.image_data_format() == 'channels_first' else -1
+ loss = None
+ num_channels = None
+ activation = None
+ if loss_name == 'sparse_categorical_crossentropy':
+ loss = lambda y_true, y_pred: K.sparse_categorical_crossentropy( # pylint: disable=g-long-lambda
+ y_true, y_pred, axis=axis)
+ num_channels = np.amax(target) + 1
+ activation = 'softmax'
+ elif loss_name == 'categorical_crossentropy':
+ loss = lambda y_true, y_pred: K.categorical_crossentropy( # pylint: disable=g-long-lambda
+ y_true, y_pred, axis=axis)
+ num_channels = target.shape[axis]
+ activation = 'softmax'
+ elif loss_name == 'binary_crossentropy':
+ loss = lambda y_true, y_pred: K.binary_crossentropy(y_true, y_pred) # pylint: disable=unnecessary-lambda
+ num_channels = target.shape[axis]
+ activation = 'sigmoid'
+ predictions = Conv2D(num_channels,
+ 1,
+ activation=activation,
+ kernel_initializer='ones',
+ bias_initializer='ones')(input_tensor)
+ simple_model = keras.models.Model(inputs=input_tensor,
+ outputs=predictions)
+ simple_model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3), loss=loss)
+ return simple_model
+
+ if test.is_gpu_available(cuda_only=True):
+ with self.test_session(use_gpu=True):
+ losses_to_test = ['sparse_categorical_crossentropy',
+ 'categorical_crossentropy', 'binary_crossentropy']
+
+ data_channels_first = np.array([[[[8., 7.1, 0.], [4.5, 2.6, 0.55],
+ [0.9, 4.2, 11.2]]]], dtype=np.float32)
+ # Labels for testing 4-class sparse_categorical_crossentropy, 4-class
+ # categorical_crossentropy, and 2-class binary_crossentropy:
+ labels_channels_first = [np.array([[[[0, 1, 3], [2, 1, 0], [2, 2, 1]]]], dtype=np.float32), # pylint: disable=line-too-long
+ np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 0]],
+ [[1, 0, 0], [0, 0, 1], [0, 1, 0]],
+ [[0, 0, 0], [1, 0, 0], [0, 0, 1]],
+ [[0, 0, 1], [0, 0, 0], [1, 0, 0]]]], dtype=np.float32), # pylint: disable=line-too-long
+ np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 1]],
+ [[1, 0, 1], [1, 0, 1], [1, 1, 0]]]], dtype=np.float32)] # pylint: disable=line-too-long
+ # Compute one loss for each loss function in the list `losses_to_test`:
+ loss_channels_last = [0., 0., 0.]
+ loss_channels_first = [0., 0., 0.]
+
+ old_data_format = K.image_data_format()
+
+ # Evaluate a simple network with channels last, with all three loss
+ # functions:
+ K.set_image_data_format('channels_last')
+ data = np.moveaxis(data_channels_first, 1, -1)
+ for index, loss_function in enumerate(losses_to_test):
+ labels = np.moveaxis(labels_channels_first[index], 1, -1)
+ inputs = keras.Input(shape=(3, 3, 1))
+ model = prepare_simple_model(inputs, loss_function, labels)
+ loss_channels_last[index] = model.evaluate(x=data, y=labels,
+ batch_size=1, verbose=0)
+
+ # Evaluate the same network with channels first, with all three loss
+ # functions:
+ K.set_image_data_format('channels_first')
+ data = data_channels_first
+ for index, loss_function in enumerate(losses_to_test):
+ labels = labels_channels_first[index]
+ inputs = keras.Input(shape=(1, 3, 3))
+ model = prepare_simple_model(inputs, loss_function, labels)
+ loss_channels_first[index] = model.evaluate(x=data, y=labels,
+ batch_size=1, verbose=0)
+
+ K.set_image_data_format(old_data_format)
+
+ np.testing.assert_allclose(loss_channels_first,
+ loss_channels_last,
+ err_msg='{}{}'.format(
+ 'Computed different losses for ',
+ 'channels_first and channels_last'))
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/keras/engine/training_test.py b/tensorflow/python/keras/engine/training_test.py
index d9e548f01f..301a6ca866 100644
--- a/tensorflow/python/keras/engine/training_test.py
+++ b/tensorflow/python/keras/engine/training_test.py
@@ -18,6 +18,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import logging
import os
import unittest
@@ -415,6 +416,28 @@ class TrainingTest(test.TestCase):
x2 = model.predict(val_a)
self.assertAllClose(x1, x2, atol=1e-7)
+ def test_compile_warning_for_loss_missing_output(self):
+ with self.test_session():
+ inp = keras.layers.Input(shape=(16,), name='input_a')
+ out_1 = keras.layers.Dense(8, name='dense_1')(inp)
+ out_2 = keras.layers.Dense(3, activation='softmax', name='dense_2')(out_1)
+ model = keras.models.Model(inputs=[inp], outputs=[out_1, out_2])
+
+ with test.mock.patch.object(logging, 'warning') as mock_log:
+ model.compile(
+ loss={
+ 'dense_2': 'categorical_crossentropy',
+ },
+ optimizer='rmsprop',
+ metrics={
+ 'dense_2': 'categorical_accuracy',
+ 'dense_1': 'categorical_accuracy',
+ })
+ msg = ('Output "dense_1" missing from loss dictionary. We assume this '
+ 'was done on purpose. The fit and evaluate APIs will not be '
+ 'expecting any data to be passed to "dense_1".')
+ self.assertRegexpMatches(str(mock_log.call_args), msg)
+
class LossWeightingTest(test.TestCase):
@@ -744,6 +767,22 @@ class LossMaskingTest(test.TestCase):
keras.backend.variable(weights), keras.backend.variable(mask)))
+class LearningPhaseTest(test.TestCase):
+
+ def test_empty_model_no_learning_phase(self):
+ with self.test_session():
+ model = keras.models.Sequential()
+ self.assertFalse(model.uses_learning_phase)
+
+ def test_dropout_has_learning_phase(self):
+ with self.test_session():
+ model = keras.models.Sequential()
+ model.add(keras.layers.Dense(2, input_dim=3))
+ model.add(keras.layers.Dropout(0.5))
+ model.add(keras.layers.Dense(2))
+ self.assertTrue(model.uses_learning_phase)
+
+
class TestDynamicTrainability(test.TestCase):
def test_trainable_warning(self):
diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py
index 728a2b493b..dbbc87daf9 100644
--- a/tensorflow/python/keras/engine/training_utils.py
+++ b/tensorflow/python/keras/engine/training_utils.py
@@ -19,9 +19,11 @@ from __future__ import division
from __future__ import print_function
import copy
+import math
import numpy as np
+from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_util
@@ -31,6 +33,135 @@ from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.ops import math_ops
+def _map_nested(data, func):
+ """Maps each nested element using func."""
+ if isinstance(data, list):
+ return [_map_nested(nested_data, func) for nested_data in data]
+ elif isinstance(data, tuple):
+ return tuple(_map_nested(nested_data, func) for nested_data in data)
+ elif isinstance(data, dict):
+ return {
+ k: _map_nested(nested_data, func) for k, nested_data in data.items()
+ }
+ else:
+ return func(data)
+
+
+def _nested_all(data, cond_func):
+ """Checks if all elements in a nested structure satisfy cond_func."""
+ if isinstance(data, (tuple, list)):
+ return all([_nested_all(nested_data, cond_func) for nested_data in data])
+ elif isinstance(data, dict):
+ return all(
+ [_nested_all(nested_data, cond_func) for nested_data in data.values()])
+ else:
+ return cond_func(data)
+
+
+def _nested_any(data, cond_func):
+ """Checks if any nested_elements in a nested structure satisfy cond_func."""
+ if isinstance(data, (tuple, list)):
+ return any([_nested_any(nested_data, cond_func) for nested_data in data])
+ elif isinstance(data, dict):
+ return any(
+ [_nested_any(nested_data, cond_func) for nested_data in data.values()])
+ else:
+ return cond_func(data)
+
+
+def _convert_lists_to_tuples(data):
+ """Converts all lists to tuples, since Datasets expect tuples."""
+ if isinstance(data, (tuple, list)):
+ return tuple(_convert_lists_to_tuples(nested_data) for nested_data in data)
+ elif isinstance(data, dict):
+ return {
+ k: _convert_lists_to_tuples(nested_data)
+ for k, nested_data in data.items()
+ }
+ else:
+ return data
+
+
+def _get_batch_axis_size(data):
+ """Returns batch axis shape for nested data."""
+ if isinstance(data, (tuple, list)):
+ return _get_batch_axis_size(data[0])
+ elif isinstance(data, dict):
+ return _get_batch_axis_size(list(data.values()))
+ else:
+ return int(data.shape[0])
+
+
+def convert_to_iterator(x=None,
+ y=None,
+ sample_weights=None,
+ batch_size=None,
+ steps_per_epoch=None,
+ epochs=1,
+ shuffle=False):
+ """Converts NumPy arrays or EagerTensors to an EagerIterator.
+
+ Combines all provided data into a single EagerIterator.
+
+ Arguments:
+ x: NumPy array or EagerTensor, or list of Numpy arrays or EagerTensors
+ representing inputs to a model.
+ y: Optional. NumPy array or EagerTensor, or list of Numpy arrays or
+ EagerTensors representing targets of a model.
+ sample_weights: Optional NumPy array or EagerTensor representing sample
+ weights.
+ batch_size: Used to batch data and calculate how many steps EagerIterator
+ should take per epoch.
+ steps_per_epoch: If provided, how many steps EagerIterator should take per
+ epoch.
+ epochs: Epochs to repeat iterator for.
+ shuffle: Whether to shuffle data after each epoch.
+
+ Raises:
+ ValueError: if steps_per_epoch cannot be calculated from the data
+ provided.
+
+ Returns:
+ (Iterator, steps_per_epoch).
+
+ """
+ if isinstance(x, iterator_ops.EagerIterator):
+ return x, steps_per_epoch
+
+ if not _nested_any(sample_weights, lambda x: x is None):
+ data = (x, y, sample_weights)
+ elif not _nested_any(y, lambda x: x is None):
+ data = (x, y)
+ else:
+ # always wrap in a tuple, so we know y, sample_weights weren't set
+ # even when x has multiple elements
+ data = (x,)
+
+ data = _convert_lists_to_tuples(data)
+ if steps_per_epoch is None and batch_size is not None:
+ num_samples = _get_batch_axis_size(data)
+ steps_per_epoch = int(math.ceil(num_samples / batch_size))
+
+ if steps_per_epoch is None:
+ raise ValueError('Could not determine steps_per_epoch.'
+ 'Please provide either batch_size or'
+ 'steps_per_epoch.')
+
+ # TODO(omalleyt) for NumPy arrays in graph mode
+ # placeholder ops should be used
+ # this is only ideal for eager mode
+ dataset = dataset_ops.Dataset.from_tensor_slices(data)
+
+ if batch_size is not None:
+ dataset = dataset.batch(batch_size)
+ if shuffle:
+ dataset = dataset.shuffle(buffer_size=10000)
+ dataset = dataset.repeat(epochs)
+ iterator = dataset.make_one_shot_iterator()
+
+ return iterator, steps_per_epoch
+
+
def check_num_samples(ins,
batch_size=None,
steps=None,
@@ -128,8 +259,8 @@ def standardize_input_data(data,
except KeyError as e:
raise ValueError('No data provided for "' + e.args[0] + '". Need data '
'for each key in: ' + str(names))
- elif isinstance(data, list):
- if isinstance(data[0], list):
+ elif isinstance(data, (list, tuple)):
+ if isinstance(data[0], (list, tuple)):
data = [np.asarray(d) for d in data]
elif len(names) == 1 and isinstance(data[0], (float, int)):
data = [np.asarray(data)]
@@ -482,6 +613,9 @@ def standardize_weights(y,
Raises:
ValueError: In case of invalid user-provided arguments.
"""
+ # Iterator may return sample_weight as 1-tuple
+ if isinstance(sample_weight, tuple):
+ sample_weight = sample_weight[0]
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise ValueError('"sample_weight_mode '
diff --git a/tensorflow/python/keras/engine/training_utils_test.py b/tensorflow/python/keras/engine/training_utils_test.py
new file mode 100644
index 0000000000..297a1ae494
--- /dev/null
+++ b/tensorflow/python/keras/engine/training_utils_test.py
@@ -0,0 +1,150 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for training utility functions."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
+from tensorflow.python.keras.engine import training_utils
+from tensorflow.python.platform import test
+
+
+class TrainingUtilTest(test.TestCase):
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_single_numpy(self):
+ batch_size = 2
+ a = np.ones([10, 10])
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_batch = a[:batch_size, :]
+ actual_batch, = iterator.get_next()
+ self.assertAllEqual(expected_batch, actual_batch)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_single_tensor(self):
+ batch_size = 2
+ a = ops.convert_to_tensor(np.ones([10, 10]))
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_batch = a[:batch_size, :]
+ actual_batch, = iterator.get_next()
+ self.assertAllEqual(expected_batch, actual_batch)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_y(self):
+ batch_size = 2
+ a = np.ones([10, 100])
+ b = np.ones([10, 10])
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, y=b, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_x = a[:batch_size, :]
+ expected_y = b[:batch_size, :]
+ actual_x, actual_y = iterator.get_next()
+ self.assertAllEqual(expected_x, actual_x)
+ self.assertAllEqual(expected_y, actual_y)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_sample_weights(self):
+ batch_size = 2
+ a = ops.convert_to_tensor(np.ones([10, 100]))
+ b = ops.convert_to_tensor(np.ones([10, 10]))
+ sw = ops.convert_to_tensor(np.ones([10]))
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, y=b, sample_weights=sw, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_x = a[:batch_size, :]
+ expected_y = b[:batch_size, :]
+ expected_sw = sw[:batch_size]
+ actual_x, actual_y, actual_sw = iterator.get_next()
+ self.assertAllEqual(expected_x, actual_x)
+ self.assertAllEqual(expected_y, actual_y)
+ self.assertAllEqual(expected_sw, actual_sw)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_nested(self):
+ batch_size = 2
+ x = {'1': np.ones([10, 100]), '2': [np.zeros([10, 10]), np.ones([10, 20])]}
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=x, batch_size=batch_size)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_x1 = x['1'][:batch_size, :]
+ expected_x2_0 = x['2'][0][:batch_size, :]
+ expected_x2_1 = x['2'][1][:batch_size, :]
+
+ actual_x, = iterator.get_next()
+ actual_x1 = actual_x['1'][:batch_size, :]
+ actual_x2_0 = actual_x['2'][0][:batch_size, :]
+ actual_x2_1 = actual_x['2'][1][:batch_size, :]
+
+ self.assertAllEqual(expected_x1, actual_x1)
+ self.assertAllEqual(expected_x2_0, actual_x2_0)
+ self.assertAllEqual(expected_x2_1, actual_x2_1)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_epochs(self):
+ batch_size = 2
+ a = np.ones([10, 10])
+ iterator, steps_per_epoch = training_utils.convert_to_iterator(
+ x=a, batch_size=batch_size, epochs=2)
+ self.assertEquals(steps_per_epoch, 5)
+
+ expected_batch = a[:batch_size, :]
+ # loop through one whole epoch
+ for _ in range(6):
+ actual_batch, = iterator.get_next()
+ self.assertAllEqual(expected_batch, actual_batch)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_convert_to_iterator_insufficient_info(self):
+ # with batch_size and steps_per_epoch not set
+ with self.assertRaises(ValueError):
+ a = np.ones([10, 10])
+ _ = training_utils.convert_to_iterator(x=a)
+
+ def test_nested_all(self):
+ nested_data = {'a': True, 'b': [True, True, (False, True)]}
+ all_true = training_utils._nested_all(nested_data, lambda x: x)
+ self.assertEquals(all_true, False)
+
+ nested_data = {'a': True, 'b': [True, True, (True, True)]}
+ all_true = training_utils._nested_all(nested_data, lambda x: x)
+ self.assertEquals(all_true, True)
+
+ def test_nested_any(self):
+ nested_data = [False, {'a': False, 'b': (False, True)}]
+ any_true = training_utils._nested_any(nested_data, lambda x: x)
+ self.assertEquals(any_true, True)
+
+ nested_data = [False, {'a': False, 'b': (False, False)}]
+ any_true = training_utils._nested_any(nested_data, lambda x: x)
+ self.assertEquals(any_true, False)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/keras/initializers.py b/tensorflow/python/keras/initializers.py
index 28beb6760d..b9d856efa8 100644
--- a/tensorflow/python/keras/initializers.py
+++ b/tensorflow/python/keras/initializers.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""Keras initializer classes (soon to be replaced with core TF initializers).
+"""Keras initializer serialization / deserialization.
"""
from __future__ import absolute_import
from __future__ import division
@@ -22,107 +22,27 @@ import six
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
+
+# These imports are brought in so that keras.initializers.deserialize
+# has them available in module_objects.
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import glorot_normal_initializer
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
-
+from tensorflow.python.ops.init_ops import he_normal # pylint: disable=unused-import
+from tensorflow.python.ops.init_ops import he_uniform # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Identity
from tensorflow.python.ops.init_ops import Initializer # pylint: disable=unused-import
+from tensorflow.python.ops.init_ops import lecun_normal # pylint: disable=unused-import
+from tensorflow.python.ops.init_ops import lecun_uniform # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal
from tensorflow.python.ops.init_ops import RandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal
-from tensorflow.python.ops.init_ops import VarianceScaling
+from tensorflow.python.ops.init_ops import VarianceScaling # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Zeros
-from tensorflow.python.util.tf_export import tf_export
-
-
-@tf_export('keras.initializers.lecun_normal')
-def lecun_normal(seed=None):
- """LeCun normal initializer.
-
- It draws samples from a truncated normal distribution centered on 0
- with `stddev = sqrt(1 / fan_in)`
- where `fan_in` is the number of input units in the weight tensor.
-
- Arguments:
- seed: A Python integer. Used to seed the random generator.
-
- Returns:
- An initializer.
-
- References:
- - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- - [Efficient
- Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
- """
- return VarianceScaling(
- scale=1., mode='fan_in', distribution='normal', seed=seed)
-
-
-@tf_export('keras.initializers.lecun_uniform')
-def lecun_uniform(seed=None):
- """LeCun uniform initializer.
-
- It draws samples from a uniform distribution within [-limit, limit]
- where `limit` is `sqrt(3 / fan_in)`
- where `fan_in` is the number of input units in the weight tensor.
-
- Arguments:
- seed: A Python integer. Used to seed the random generator.
-
- Returns:
- An initializer.
- References:
- LeCun 98, Efficient Backprop,
- http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
- """
- return VarianceScaling(
- scale=1., mode='fan_in', distribution='uniform', seed=seed)
-
-
-@tf_export('keras.initializers.he_normal')
-def he_normal(seed=None):
- """He normal initializer.
-
- It draws samples from a truncated normal distribution centered on 0
- with `stddev = sqrt(2 / fan_in)`
- where `fan_in` is the number of input units in the weight tensor.
-
- Arguments:
- seed: A Python integer. Used to seed the random generator.
-
- Returns:
- An initializer.
-
- References:
- He et al., http://arxiv.org/abs/1502.01852
- """
- return VarianceScaling(
- scale=2., mode='fan_in', distribution='normal', seed=seed)
-
-
-@tf_export('keras.initializers.he_uniform')
-def he_uniform(seed=None):
- """He uniform variance scaling initializer.
-
- It draws samples from a uniform distribution within [-limit, limit]
- where `limit` is `sqrt(6 / fan_in)`
- where `fan_in` is the number of input units in the weight tensor.
-
- Arguments:
- seed: A Python integer. Used to seed the random generator.
-
- Returns:
- An initializer.
-
- References:
- He et al., http://arxiv.org/abs/1502.01852
- """
- return VarianceScaling(
- scale=2., mode='fan_in', distribution='uniform', seed=seed)
+from tensorflow.python.util.tf_export import tf_export
# Compatibility aliases
diff --git a/tensorflow/python/keras/initializers_test.py b/tensorflow/python/keras/initializers_test.py
index c519e194bd..51725e03f2 100644
--- a/tensorflow/python/keras/initializers_test.py
+++ b/tensorflow/python/keras/initializers_test.py
@@ -31,16 +31,6 @@ class KerasInitializersTest(test.TestCase):
target_max=None, target_min=None):
variable = keras.backend.variable(init(shape))
output = keras.backend.get_value(variable)
- lim = 3e-2
- if target_std is not None:
- self.assertGreater(lim, abs(output.std() - target_std))
- if target_mean is not None:
- self.assertGreater(lim, abs(output.mean() - target_mean))
- if target_max is not None:
- self.assertGreater(lim, abs(output.max() - target_max))
- if target_min is not None:
- self.assertGreater(lim, abs(output.min() - target_min))
-
# Test serialization (assumes deterministic behavior).
config = init.get_config()
reconstructed_init = init.__class__.from_config(config)
diff --git a/tensorflow/python/keras/layers/advanced_activations.py b/tensorflow/python/keras/layers/advanced_activations.py
index eba10da6f3..61ab69c16f 100644
--- a/tensorflow/python/keras/layers/advanced_activations.py
+++ b/tensorflow/python/keras/layers/advanced_activations.py
@@ -284,6 +284,13 @@ class Softmax(Layer):
class ReLU(Layer):
"""Rectified Linear Unit activation function.
+ With default values, it returns element-wise `max(x, 0)`.
+
+ Otherwise, it follows:
+ `f(x) = max_value` for `x >= max_value`,
+ `f(x) = x` for `threshold <= x < max_value`,
+ `f(x) = negative_slope * (x - threshold)` otherwise.
+
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
@@ -294,21 +301,39 @@ class ReLU(Layer):
Arguments:
max_value: float >= 0. Maximum activation value.
+ negative_slope: float >= 0. Negative slope coefficient.
+ threshold: float. Threshold value for thresholded activation.
"""
- def __init__(self, max_value=None, **kwargs):
+ def __init__(self, max_value=None, negative_slope=0, threshold=0, **kwargs):
super(ReLU, self).__init__(**kwargs)
- self.support_masking = True
- self.max_value = K.cast_to_floatx(max_value)
- if self.max_value < 0.:
+ if max_value is not None and max_value < 0.:
raise ValueError('max_value of Relu layer '
'cannot be negative value: ' + str(max_value))
+ if negative_slope < 0.:
+ raise ValueError('negative_slope of Relu layer '
+ 'cannot be negative value: ' + str(negative_slope))
+
+ self.support_masking = True
+ self.max_value = K.cast_to_floatx(max_value)
+ self.negative_slope = K.cast_to_floatx(negative_slope)
+ self.threshold = K.cast_to_floatx(threshold)
def call(self, inputs):
- return activations.relu(inputs, max_value=self.max_value)
+ # alpha is used for leaky relu slope in activations instead of
+ # negative_slope.
+ return activations.relu(
+ inputs,
+ alpha=self.negative_slope,
+ max_value=self.max_value,
+ threshold=self.threshold)
def get_config(self):
- config = {'max_value': self.max_value}
+ config = {
+ 'max_value': self.max_value,
+ 'negative_slope': self.negative_slope,
+ 'threshold': self.threshold
+ }
base_config = super(ReLU, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
diff --git a/tensorflow/python/keras/layers/advanced_activations_test.py b/tensorflow/python/keras/layers/advanced_activations_test.py
index 9e1f15b1bc..53c1baa2bb 100644
--- a/tensorflow/python/keras/layers/advanced_activations_test.py
+++ b/tensorflow/python/keras/layers/advanced_activations_test.py
@@ -75,6 +75,14 @@ class AdvancedActivationsTest(test.TestCase):
testing_utils.layer_test(keras.layers.ReLU,
kwargs={'max_value': -10},
input_shape=(2, 3, 4))
+ with self.assertRaisesRegexp(
+ ValueError,
+ 'negative_slope of Relu layer cannot be negative value: -2'):
+ with self.test_session():
+ testing_utils.layer_test(
+ keras.layers.ReLU,
+ kwargs={'negative_slope': -2},
+ input_shape=(2, 3, 4))
if __name__ == '__main__':
diff --git a/tensorflow/python/keras/layers/convolutional_recurrent.py b/tensorflow/python/keras/layers/convolutional_recurrent.py
index 84d794cada..e61dd3043d 100644
--- a/tensorflow/python/keras/layers/convolutional_recurrent.py
+++ b/tensorflow/python/keras/layers/convolutional_recurrent.py
@@ -788,7 +788,7 @@ class ConvLSTM2D(ConvRNN2D):
Arguments:
filters: Integer, the dimensionality of the output space
- (i.e. the number output of filters in the convolution).
+ (i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
diff --git a/tensorflow/python/keras/layers/cudnn_recurrent_test.py b/tensorflow/python/keras/layers/cudnn_recurrent_test.py
index 8fd970239f..2ed0aa8f26 100644
--- a/tensorflow/python/keras/layers/cudnn_recurrent_test.py
+++ b/tensorflow/python/keras/layers/cudnn_recurrent_test.py
@@ -220,7 +220,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
self.assertNotEqual(out4.max(), out5.max())
@parameterized.named_parameters(
- *testing_utils.generate_combinations_with_testcase_name(
+ *test_util.generate_combinations_with_testcase_name(
rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False],
bidirectional=[True, False], implementation=[1, 2],
model_nest_level=[1, 2], model_type=['seq', 'func']))
@@ -301,7 +301,7 @@ class CuDNNTest(test.TestCase, parameterized.TestCase):
os.remove(fname)
@parameterized.named_parameters(
- *testing_utils.generate_combinations_with_testcase_name(
+ *test_util.generate_combinations_with_testcase_name(
rnn_type=['LSTM', 'GRU'], to_cudnn=[True, False]))
def test_load_weights_between_noncudnn_rnn_time_distributed(self, rnn_type,
to_cudnn):
diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py
index 58c8a8a66d..a7835bc0a2 100644
--- a/tensorflow/python/keras/layers/normalization.py
+++ b/tensorflow/python/keras/layers/normalization.py
@@ -370,7 +370,7 @@ class BatchNormalization(Layer):
decay = ops.convert_to_tensor(1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
- update_delta = (variable - value) * decay
+ update_delta = (variable - math_ops.cast(value, variable.dtype)) * decay
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
@@ -619,6 +619,10 @@ class BatchNormalization(Layer):
else:
mean, variance = self.moving_mean, self.moving_variance
+ mean = math_ops.cast(mean, inputs.dtype)
+ variance = math_ops.cast(variance, inputs.dtype)
+ if offset is not None:
+ offset = math_ops.cast(offset, inputs.dtype)
outputs = nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
diff --git a/tensorflow/python/keras/layers/normalization_test.py b/tensorflow/python/keras/layers/normalization_test.py
index b22f3bd152..a97b4cac46 100644
--- a/tensorflow/python/keras/layers/normalization_test.py
+++ b/tensorflow/python/keras/layers/normalization_test.py
@@ -95,6 +95,24 @@ class NormalizationLayersTest(test.TestCase):
np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
+ def test_batchnorm_mixed_precision(self):
+ with self.test_session():
+ model = keras.models.Sequential()
+ norm = keras.layers.BatchNormalization(input_shape=(10,), momentum=0.8)
+ model.add(norm)
+ model.compile(loss='mse', optimizer='sgd')
+
+ # centered on 5.0, variance 10.0
+ x = np.random.normal(
+ loc=5.0, scale=10.0, size=(1000, 10)).astype(np.float16)
+ model.fit(x, x, epochs=4, verbose=0)
+ out = model.predict(x)
+ out -= keras.backend.eval(norm.beta)
+ out /= keras.backend.eval(norm.gamma)
+
+ np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)
+ np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)
+
def test_batchnorm_convnet(self):
if test.is_gpu_available(cuda_only=True):
with self.test_session(use_gpu=True):
diff --git a/tensorflow/python/keras/layers/recurrent.py b/tensorflow/python/keras/layers/recurrent.py
index 61775da47b..534c0eca08 100644
--- a/tensorflow/python/keras/layers/recurrent.py
+++ b/tensorflow/python/keras/layers/recurrent.py
@@ -37,6 +37,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import tf_export
@@ -403,6 +404,8 @@ class RNN(Layer):
'one integer per RNN state).')
super(RNN, self).__init__(**kwargs)
self.cell = cell
+ if isinstance(cell, checkpointable.CheckpointableBase):
+ self._track_checkpointable(self.cell, name='cell')
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
diff --git a/tensorflow/python/keras/layers/recurrent_test.py b/tensorflow/python/keras/layers/recurrent_test.py
index 802374d2d2..fefb92826b 100644
--- a/tensorflow/python/keras/layers/recurrent_test.py
+++ b/tensorflow/python/keras/layers/recurrent_test.py
@@ -28,6 +28,7 @@ from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
+from tensorflow.python.training.checkpointable import util as checkpointable_util
class RNNTest(test.TestCase):
@@ -556,5 +557,22 @@ class RNNTest(test.TestCase):
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
+ def test_checkpointable_dependencies(self):
+ rnn = keras.layers.SimpleRNN
+ with self.test_session():
+ x = np.random.random((2, 2, 2))
+ y = np.random.random((2, 2))
+ model = keras.models.Sequential()
+ model.add(rnn(2))
+ model.compile(optimizer='rmsprop', loss='mse')
+ model.fit(x, y, epochs=1, batch_size=1)
+
+ # check whether the model variables are present in the
+ # checkpointable list of objects
+ checkpointed_objects = set(checkpointable_util.list_objects(model))
+ for v in model.variables:
+ self.assertIn(v, checkpointed_objects)
+
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/keras/layers/wrappers.py b/tensorflow/python/keras/layers/wrappers.py
index f651e03874..f0c1e76156 100644
--- a/tensorflow/python/keras/layers/wrappers.py
+++ b/tensorflow/python/keras/layers/wrappers.py
@@ -47,7 +47,6 @@ class Wrapper(Layer):
def __init__(self, layer, **kwargs):
assert isinstance(layer, Layer)
self.layer = layer
- self._track_checkpointable(layer, name='layer')
# Tracks mapping of Wrapper inputs to inner layer inputs. Useful when
# the inner layer has update ops that depend on its inputs (as opposed
# to the inputs to the Wrapper layer).
@@ -168,6 +167,7 @@ class TimeDistributed(Wrapper):
'`Layer` instance. You passed: {input}'.format(input=layer))
super(TimeDistributed, self).__init__(layer, **kwargs)
self.supports_masking = True
+ self._track_checkpointable(layer, name='layer')
def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
"""Finds non-specific dimensions in the static shapes.
@@ -417,6 +417,8 @@ class Bidirectional(Wrapper):
self._num_constants = None
super(Bidirectional, self).__init__(layer, **kwargs)
self.input_spec = layer.input_spec
+ self._track_checkpointable(self.forward_layer, name='forward_layer')
+ self._track_checkpointable(self.backward_layer, name='backward_layer')
@property
def trainable(self):
@@ -526,7 +528,8 @@ class Bidirectional(Wrapper):
else:
return super(Bidirectional, self).__call__(inputs, **kwargs)
- def call(self, inputs,
+ def call(self,
+ inputs,
training=None,
mask=None,
initial_state=None,
diff --git a/tensorflow/python/keras/layers/wrappers_test.py b/tensorflow/python/keras/layers/wrappers_test.py
index 3f268acf5c..0cd774ef0f 100644
--- a/tensorflow/python/keras/layers/wrappers_test.py
+++ b/tensorflow/python/keras/layers/wrappers_test.py
@@ -87,6 +87,8 @@ class TimeDistributedTest(test.TestCase):
# test config
model.get_config()
+ # check whether the model variables are present in the
+ # checkpointable list of objects
checkpointed_objects = set(checkpointable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
@@ -278,6 +280,12 @@ class BidirectionalTest(test.TestCase):
model.compile(optimizer=RMSPropOptimizer(0.01), loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
+ # check whether the model variables are present in the
+ # checkpointable list of objects
+ checkpointed_objects = set(checkpointable_util.list_objects(model))
+ for v in model.variables:
+ self.assertIn(v, checkpointed_objects)
+
# test compute output shape
ref_shape = model.layers[-1].output.get_shape()
shape = model.layers[-1].compute_output_shape(
diff --git a/tensorflow/python/keras/metrics.py b/tensorflow/python/keras/metrics.py
index e03d7dfe93..7d8b1fec45 100644
--- a/tensorflow/python/keras/metrics.py
+++ b/tensorflow/python/keras/metrics.py
@@ -19,9 +19,18 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+from abc import ABCMeta
+from abc import abstractmethod
+
+import types
import six
+from tensorflow.python.eager import context
+from tensorflow.python.eager import function
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
+from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.losses import binary_crossentropy
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.losses import cosine_proximity
@@ -37,14 +46,471 @@ from tensorflow.python.keras.losses import sparse_categorical_crossentropy
from tensorflow.python.keras.losses import squared_hinge
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import confusion_matrix
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variable_scope as vs
+from tensorflow.python.ops import weights_broadcast_ops
+from tensorflow.python.training import distribute as distribute_lib
+from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
+def check_is_tensor_or_operation(x, name):
+ """Raises type error if the given input is not a tensor or operation."""
+ if not (isinstance(x, ops.Tensor) or isinstance(x, ops.Operation)):
+ raise TypeError('{0} must be a Tensor or Operation, given: {1}'.format(
+ name, x))
+
+
+def update_state_wrapper(update_state_fn):
+ """Decorator to wrap metric `update_state()` with `defun()`, `add_update()`.
+
+ Args:
+ update_state_fn: function that accumulates metric statistics.
+
+ Returns:
+ If eager execution is enabled, returns None.
+ If graph execution is enabled, returns an update op. This op should be
+ executed to update the metric state with the given inputs.
+ """
+
+ def decorated(metric_obj, *args, **kwargs):
+ """Decorated function with `defun()` and `add_update()`."""
+
+ # Converting update_state_fn() into a graph function, so that
+ # we can return a single op that performs all of the variable updates.
+ # Assigning to a different method name to avoid reference cycle.
+ defuned_update_state_fn = function.defun(update_state_fn)
+ update_op = defuned_update_state_fn(*args, **kwargs)
+ if update_op is not None: # update_op will be None in eager execution.
+ metric_obj.add_update(update_op, inputs=True)
+ check_is_tensor_or_operation(
+ update_op, 'Metric {0}\'s update'.format(metric_obj.name))
+ return update_op
+
+ return tf_decorator.make_decorator(update_state_fn, decorated)
+
+
+def result_wrapper(result_fn):
+ """Decorator to wrap metric `result()` function in `merge_call()`.
+
+ Result computation is an idempotent operation that simply calculates the
+ metric value using the state variables.
+
+ If metric state variables are distributed across towers/devices and
+ `result()` is requested from the context of one device - This function wraps
+ `result()` in a distribution strategy `merge_call()`. With this,
+ the metric state variables will be aggregated across devices.
+
+ Args:
+ result_fn: function that computes the metric result.
+
+ Returns:
+ The metric result tensor.
+ """
+
+ def decorated(metric_obj, *args):
+ """Decorated function with merge_call."""
+ tower_context = distribute_lib.get_tower_context()
+ if tower_context is None: # if in cross tower context already
+ result_t = result_fn(*args)
+ else:
+ # TODO(psv): Test distribution of metrics using different distribution
+ # strategies.
+
+ # Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
+ # with distribution object as the first parameter. We create a wrapper
+ # here so that the result function need not have that parameter.
+ def merge_fn_wrapper(distribution, merge_fn, *args):
+ # We will get `PerDevice` merge function. Taking the first one as all
+ # are identical copies of the function that we had passed below.
+ return distribution.unwrap(merge_fn)[0](*args)
+
+ # Wrapping result in merge_call. merge_call is used when we want to leave
+ # tower mode and compute a value in cross tower mode.
+ result_t = tower_context.merge_call(merge_fn_wrapper, result_fn, *args)
+ check_is_tensor_or_operation(result_t,
+ 'Metric {0}\'s result'.format(metric_obj.name))
+ return result_t
+
+ return tf_decorator.make_decorator(result_fn, decorated)
+
+
+def _safe_div(numerator, denominator):
+ """Divides two tensors element-wise, returning 0 if the denominator is <= 0.
+
+ Args:
+ numerator: A `Tensor`.
+ denominator: A `Tensor`, with dtype matching `numerator`.
+
+ Returns:
+ 0 if `denominator` <= 0, else `numerator` / `denominator`
+ """
+ t = math_ops.truediv(numerator, denominator)
+ zero = array_ops.zeros_like(t, dtype=denominator.dtype)
+ condition = math_ops.greater(denominator, zero)
+ zero = math_ops.cast(zero, t.dtype)
+ return array_ops.where(condition, t, zero)
+
+
+def _squeeze_or_expand_dimensions(y_pred, y_true, sample_weight):
+ """Squeeze or expand last dimension if needed.
+
+ 1. Squeezes last dim of `y_pred` or `y_true` if their rank differs by 1
+ (using `confusion_matrix.remove_squeezable_dimensions`).
+ 2. Squeezes or expands last dim of `sample_weight` if its rank differs by 1
+ from the new rank of `y_pred`.
+ If `sample_weight` is scalar, it is kept scalar.
+
+ This will use static shape if available. Otherwise, it will add graph
+ operations, which could result in a performance hit.
+
+ Args:
+ y_pred: Predicted values, a `Tensor` of arbitrary dimensions.
+ y_true: Optional label `Tensor` whose dimensions match `y_pred`.
+ sample_weight: Optional weight scalar or `Tensor` whose dimensions match
+ `y_pred`.
+
+ Returns:
+ Tuple of `y_pred`, `y_true` and `sample_weight`. Each of them possibly has
+ the last dimension squeezed,
+ `sample_weight` could be extended by one dimension.
+ """
+ if y_true is not None:
+ # squeeze last dim of `y_pred` or `y_true` if their rank differs by 1
+ y_true, y_pred = confusion_matrix.remove_squeezable_dimensions(
+ y_true, y_pred)
+ y_pred.get_shape().assert_is_compatible_with(y_true.get_shape())
+
+ if sample_weight is None:
+ return y_pred, y_true, None
+
+ sample_weight = ops.convert_to_tensor(sample_weight)
+ weights_shape = sample_weight.get_shape()
+ weights_rank = weights_shape.ndims
+ if weights_rank == 0: # If weights is scalar, do nothing.
+ return y_pred, y_true, sample_weight
+
+ y_pred_shape = y_pred.get_shape()
+ y_pred_rank = y_pred_shape.ndims
+ if (y_pred_rank is not None) and (weights_rank is not None):
+ # Use static rank.
+ if weights_rank - y_pred_rank == 1:
+ sample_weight = array_ops.squeeze(sample_weight, [-1])
+ elif y_pred_rank - weights_rank == 1:
+ sample_weight = array_ops.expand_dims(sample_weight, [-1])
+ return y_pred, y_true, sample_weight
+
+ # Use dynamic rank.
+ weights_rank_tensor = array_ops.rank(sample_weight)
+ rank_diff = weights_rank_tensor - array_ops.rank(y_pred)
+ maybe_squeeze_weights = lambda: array_ops.squeeze(sample_weight, [-1])
+
+ def _maybe_expand_weights():
+ return control_flow_ops.cond(
+ math_ops.equal(rank_diff,
+ -1), lambda: array_ops.expand_dims(sample_weight, [-1]),
+ lambda: sample_weight)
+
+ def _maybe_adjust_weights():
+ return control_flow_ops.cond(
+ math_ops.equal(rank_diff, 1), maybe_squeeze_weights,
+ _maybe_expand_weights)
+
+ # squeeze or expand last dim of `sample_weight` if its rank differs by 1
+ # from the new rank of `y_pred`.
+ sample_weight = control_flow_ops.cond(
+ math_ops.equal(weights_rank_tensor, 0), lambda: sample_weight,
+ _maybe_adjust_weights)
+ return y_pred, y_true, sample_weight
+
+
+class Metric(Layer):
+ """Encapsulates metric logic and state.
+
+ Usage with eager execution:
+
+ ```python
+ m = SomeMetric(...)
+ for input in ...:
+ m.update_state(input)
+ print('Final result: ', m.result().numpy())
+ ```
+
+ Usage with graph execution:
+
+ ```python
+ m = SomeMetric(...)
+ init_op = tf.global_variables_initializer() # Initialize variables
+ with tf.Session() as sess:
+ sess.run(init_op)
+ for input in ...:
+ update_op = m.update_state(input)
+ sess.run(update_op)
+ print('Final result: ', sess.run(m.result()))
+ ```
+
+ To be implemented by subclasses:
+ * `__init__()`: All state variables should be created in this method by
+ calling `self.add_weight()` like: `self.var = self.add_weight(...)`
+ * `update_state()`: Has all updates to the state variables like:
+ self.var.assign_add(...).
+ * `result()`: Computes and returns a value for the metric
+ from the state variables.
+
+ Example subclass implementation:
+
+ ```
+ class BinaryTruePositives(Metric):
+ def __init__(self, name='binary-true-positives', dtype=None):
+ super(BinaryTruePositives, self).__init__(name=name, dtype=dtype)
+ self.true_positives = self.add_weight(
+ 'true_positives', initializer=init_ops.zeros_initializer)
+
+ def update_state(self, y_true, y_pred, sample_weight=None):
+ y_true = math_ops.cast(y_true, dtypes.bool)
+ y_pred = math_ops.cast(y_pred, dtypes.bool)
+ y_pred, y_true, sample_weight = _squeeze_or_expand_dimensions(
+ y_pred, y_true, sample_weight)
+
+ values = math_ops.logical_and(
+ math_ops.equal(y_true, True), math_ops.equal(y_pred, True))
+ values = math_ops.cast(values, self._dtype)
+ if sample_weight is not None:
+ sample_weight = math_ops.cast(sample_weight, self._dtype)
+ values = math_ops.multiply(values, sample_weight)
+ state_ops.assign_add(self.true_positives, math_ops.reduce_sum(values))
+
+ def result(self):
+ return array_ops.identity(self.true_positives)
+ ```
+ """
+ __metaclass__ = ABCMeta
+
+ def __init__(self, name=None, dtype=None):
+ super(Metric, self).__init__(name=name, dtype=dtype)
+ self.stateful = True # All metric layers are stateful.
+ self.built = True
+ self._dtype = K.floatx() if dtype is None else dtypes.as_dtype(dtype).name
+
+ def __new__(cls, *args, **kwargs):
+ obj = super(Metric, cls).__new__(cls, *args, **kwargs)
+ obj.update_state = types.MethodType(
+ update_state_wrapper(obj.update_state), obj)
+ obj.result = types.MethodType(result_wrapper(obj.result), obj)
+ return obj
+
+ def __call__(self, *args, **kwargs):
+ """Accumulates statistics and then computes metric result value.
+
+ Args:
+ *args:
+ **kwargs: A mini-batch of inputs to the Metric,
+ passed on to `update_state()`.
+
+ Returns:
+ The metric value tensor.
+ """
+ update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable
+ with ops.control_dependencies([update_op]):
+ return self.result() # pylint: disable=not-callable
+
+ def reset_states(self):
+ """Resets all of the metric state variables.
+
+ This function is called between epochs/steps,
+ when a metric is evaluated during training.
+ """
+ for v in self.variables:
+ K.set_value(v, 0)
+
+ @abstractmethod
+ def update_state(self, *args, **kwargs):
+ """Accumulates statistics for the metric.
+
+ Note: This function is executed as a graph function in graph mode.
+ This means:
+ a) Operations on the same resource are executed in textual order.
+ This should make it easier to do things like add the updated
+ value of a variable to another, for example.
+ b) You don't need to worry about collecting the update ops to execute.
+ All update ops added to the graph by this function will be executed.
+ As a result, code should generally work the same way with graph or
+ eager execution.
+ and adds the update op to the metric layer.
+
+ Args:
+ *args:
+ **kwargs: A mini-batch of inputs to the Metric.
+ """
+ NotImplementedError('Must be implemented in subclasses.')
+
+ @abstractmethod
+ def result(self):
+ """Computes and returns the metric value tensor.
+
+ Result computation is an idempotent operation that simply calculates the
+ metric value using the state variables.
+ """
+ NotImplementedError('Must be implemented in subclasses.')
+
+ ### For use by subclasses ###
+ def add_weight(self,
+ name,
+ shape=(),
+ aggregation=vs.VariableAggregation.SUM,
+ synchronization=vs.VariableSynchronization.ON_READ,
+ initializer=None):
+ """Adds state variable. Only for use by subclasses."""
+ return super(Metric, self).add_weight(
+ name=name,
+ shape=shape,
+ dtype=self._dtype,
+ trainable=False,
+ initializer=initializer,
+ synchronization=synchronization,
+ aggregation=aggregation)
+
+ ### End: For use by subclasses ###
+
+
+class Mean(Metric):
+ """Computes the (weighted) mean of the given values.
+
+ This metric creates two variables, `total` and `count` that are used to
+ compute the average of `values`. This average is ultimately returned as `mean`
+ which is an idempotent operation that simply divides `total` by `count`.
+
+ If `sample_weight` is `None`, weights default to 1.
+ Use `sample_weight` of 0 to mask values.
+ """
+
+ def __init__(self, name='mean', dtype=None):
+ """Creates a `Mean` instance.
+
+ Args:
+ name: (Optional) string name of the metric instance.
+ dtype: (Optional) data type of the metric result.
+ """
+ super(Mean, self).__init__(name=name, dtype=dtype)
+ # Create new state variables
+ self.total = self.add_weight(
+ 'total', initializer=init_ops.zeros_initializer)
+ self.count = self.add_weight(
+ 'count', initializer=init_ops.zeros_initializer)
+
+ def update_state(self, values, sample_weight=None):
+ """Accumulates statistics for computing the mean.
+
+ For example, if `values` is [1, 3, 5, 7] then the mean is 4. If
+ the `sample_weight` is specified as [1, 1, 0, 0] then the mean would be 2.
+
+ Args:
+ values: Per-example value.
+ sample_weight: Optional weighting of each example. Defaults to 1.
+ """
+ values = math_ops.cast(values, self._dtype)
+ if sample_weight is None:
+ num_values = math_ops.cast(array_ops.size(values), self._dtype)
+ else:
+ sample_weight = math_ops.cast(sample_weight, self._dtype)
+
+ # Update dimensions of weights to match with values.
+ values, _, sample_weight = _squeeze_or_expand_dimensions(
+ values, None, sample_weight)
+ sample_weight = weights_broadcast_ops.broadcast_weights(
+ sample_weight, values)
+ num_values = math_ops.reduce_sum(sample_weight)
+ values = math_ops.multiply(values, sample_weight)
+ values = math_ops.reduce_sum(values)
+
+ # Update state variables
+ state_ops.assign_add(self.total, values)
+ state_ops.assign_add(self.count, num_values)
+
+ def result(self):
+ return _safe_div(self.total, self.count)
+
+
+class MeanMetricWrapper(Mean):
+ """Wraps a stateless metric function with the Mean metric."""
+
+ def __init__(self, fn, name=None, dtype=None, **kwargs):
+ """Creates a `MeanMetricWrapper` instance.
+
+ Args:
+ fn: The metric function to wrap, with signature
+ `fn(y_true, y_pred, **kwargs)`.
+ name: (Optional) string name of the metric instance.
+ dtype: (Optional) data type of the metric result.
+ **kwargs: The keyword arguments that are passed on to `fn`.
+ """
+ super(MeanMetricWrapper, self).__init__(name=name, dtype=dtype)
+ self._fn = fn
+ self._fn_kwargs = kwargs
+
+ def update_state(self, y_true, y_pred, sample_weight=None):
+ """Accumulates metric statistics.
+
+ `y_true` and `y_pred` should have the same shape.
+
+ Args:
+ y_true: The ground truth values.
+ y_pred: The predicted values.
+ sample_weight: Optional weighting of each example. Defaults to 1. Can be
+ a `Tensor` whose rank is either 0, or the same rank as `y_true`,
+ and must be broadcastable to `y_true`.
+ """
+ y_true = math_ops.cast(y_true, self._dtype)
+ y_pred = math_ops.cast(y_pred, self._dtype)
+ y_pred, y_true, sample_weight = _squeeze_or_expand_dimensions(
+ y_pred, y_true, sample_weight)
+
+ matches = self._fn(y_true, y_pred, **self._fn_kwargs)
+ super(MeanMetricWrapper, self).update_state(
+ matches, sample_weight=sample_weight)
+
+ def get_config(self):
+ config = self._fn_kwargs
+ base_config = super(MeanMetricWrapper, self).get_config()
+ return dict(list(base_config.items()) + list(config.items()))
+
+
+class BinaryAccuracy(MeanMetricWrapper):
+ """Calculates how often predictions matches labels.
+
+ This metric creates two local variables, `total` and `count` that are used to
+ compute the frequency with which `y_pred` matches `y_true`. This frequency is
+ ultimately returned as `binary accuracy`: an idempotent operation that simply
+ divides `total` by `count`.
+
+ If `sample_weight` is `None`, weights default to 1.
+ Use `sample_weight` of 0 to mask values.
+ """
+
+ def __init__(self, name='binary-accuracy', dtype=None, threshold=0.5):
+ """Creates a `BinaryAccuracy` instance.
+
+ Args:
+ name: (Optional) string name of the metric instance.
+ dtype: (Optional) data type of the metric result.
+ threshold: (Optional) Float representing the threshold for deciding
+ whether prediction values are 1 or 0.
+ """
+ super(BinaryAccuracy, self).__init__(
+ binary_accuracy, name, dtype=dtype, threshold=threshold)
+
+
@tf_export('keras.metrics.binary_accuracy')
-def binary_accuracy(y_true, y_pred):
- return K.mean(math_ops.equal(y_true, math_ops.round(y_pred)), axis=-1)
+def binary_accuracy(y_true, y_pred, threshold=0.5):
+ threshold = math_ops.cast(threshold, y_pred.dtype)
+ y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
+ return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@tf_export('keras.metrics.categorical_accuracy')
diff --git a/tensorflow/python/keras/metrics_test.py b/tensorflow/python/keras/metrics_test.py
index 15e793f5fc..d583379708 100644
--- a/tensorflow/python/keras/metrics_test.py
+++ b/tensorflow/python/keras/metrics_test.py
@@ -18,67 +18,72 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import os
import numpy as np
-from tensorflow.python import keras
+from tensorflow.python.eager import context
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import test_util
+from tensorflow.python.keras import backend as K
+from tensorflow.python.keras import layers
+from tensorflow.python.keras import metrics
+from tensorflow.python.keras.engine.training import Model
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
from tensorflow.python.platform import test
+from tensorflow.python.training.checkpointable import util as checkpointable_utils
class KerasMetricsTest(test.TestCase):
def test_metrics(self):
with self.test_session():
- y_a = keras.backend.variable(np.random.random((6, 7)))
- y_b = keras.backend.variable(np.random.random((6, 7)))
- for metric in [keras.metrics.binary_accuracy,
- keras.metrics.categorical_accuracy]:
+ y_a = K.variable(np.random.random((6, 7)))
+ y_b = K.variable(np.random.random((6, 7)))
+ for metric in [metrics.binary_accuracy, metrics.categorical_accuracy]:
output = metric(y_a, y_b)
- self.assertEqual(keras.backend.eval(output).shape, (6,))
+ self.assertEqual(K.eval(output).shape, (6,))
def test_sparse_categorical_accuracy(self):
with self.test_session():
- metric = keras.metrics.sparse_categorical_accuracy
- y_a = keras.backend.variable(np.random.randint(0, 7, (6,)))
- y_b = keras.backend.variable(np.random.random((6, 7)))
- self.assertEqual(keras.backend.eval(metric(y_a, y_b)).shape, (6,))
+ metric = metrics.sparse_categorical_accuracy
+ y_a = K.variable(np.random.randint(0, 7, (6,)))
+ y_b = K.variable(np.random.random((6, 7)))
+ self.assertEqual(K.eval(metric(y_a, y_b)).shape, (6,))
def test_sparse_top_k_categorical_accuracy(self):
with self.test_session():
- y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
- [0.1, 0.2, 0.7]]))
- y_true = keras.backend.variable(np.array([[1], [0]]))
- result = keras.backend.eval(
- keras.metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
+ y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
+ y_true = K.variable(np.array([[1], [0]]))
+ result = K.eval(
+ metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(result, 1)
- result = keras.backend.eval(
- keras.metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
+ result = K.eval(
+ metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(result, 0.5)
- result = keras.backend.eval(
- keras.metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
+ result = K.eval(
+ metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(result, 0.)
def test_top_k_categorical_accuracy(self):
with self.test_session():
- y_pred = keras.backend.variable(np.array([[0.3, 0.2, 0.1],
- [0.1, 0.2, 0.7]]))
- y_true = keras.backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
- result = keras.backend.eval(
- keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
+ y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
+ y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
+ result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(result, 1)
- result = keras.backend.eval(
- keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
+ result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(result, 0.5)
- result = keras.backend.eval(
- keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
+ result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(result, 0.)
def test_stateful_metrics(self):
with self.test_session():
np.random.seed(1334)
- class BinaryTruePositives(keras.layers.Layer):
+ class BinaryTruePositives(layers.Layer):
"""Stateful Metric to count the total true positives over all batches.
Assumes predictions and targets of shape `(samples, 1)`.
@@ -91,11 +96,11 @@ class KerasMetricsTest(test.TestCase):
def __init__(self, name='true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
- self.true_positives = keras.backend.variable(value=0, dtype='int32')
+ self.true_positives = K.variable(value=0, dtype='int32')
self.stateful = True
def reset_states(self):
- keras.backend.set_value(self.true_positives, 0)
+ K.set_value(self.true_positives, 0)
def __call__(self, y_true, y_pred):
"""Computes the number of true positives in a batch.
@@ -120,14 +125,14 @@ class KerasMetricsTest(test.TestCase):
return current_true_pos + true_pos
metric_fn = BinaryTruePositives()
- config = keras.metrics.serialize(metric_fn)
- metric_fn = keras.metrics.deserialize(
+ config = metrics.serialize(metric_fn)
+ metric_fn = metrics.deserialize(
config, custom_objects={'BinaryTruePositives': BinaryTruePositives})
# Test on simple model
- inputs = keras.Input(shape=(2,))
- outputs = keras.layers.Dense(1, activation='sigmoid')(inputs)
- model = keras.Model(inputs, outputs)
+ inputs = layers.Input(shape=(2,))
+ outputs = layers.Dense(1, activation='sigmoid')(inputs)
+ model = Model(inputs, outputs)
model.compile(optimizer='sgd',
loss='binary_crossentropy',
metrics=['acc', metric_fn])
@@ -184,6 +189,214 @@ class KerasMetricsTest(test.TestCase):
self.assertAllClose(
val_outs[2], history.history['val_true_positives'][-1], atol=1e-5)
+ @test_util.run_in_graph_and_eager_modes
+ def test_mean(self):
+ m = metrics.Mean(name='my_mean')
+
+ # check config
+ self.assertEqual(m.name, 'my_mean')
+ self.assertTrue(m.stateful)
+ self.assertEqual(m.dtype, dtypes.float32)
+ self.assertEqual(len(m.variables), 2)
+ self.evaluate(variables.global_variables_initializer())
+
+ # check initial state
+ self.assertEqual(self.evaluate(m.total), 0)
+ self.assertEqual(self.evaluate(m.count), 0)
+
+ # check __call__()
+ self.assertEqual(self.evaluate(m(100)), 100)
+ self.assertEqual(self.evaluate(m.total), 100)
+ self.assertEqual(self.evaluate(m.count), 1)
+
+ # check update_state() and result() + state accumulation + tensor input
+ update_op = m.update_state(ops.convert_n_to_tensor([1, 5]))
+ self.evaluate(update_op)
+ self.assertAlmostEqual(self.evaluate(m.result()), 106 / 3, 2)
+ self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5
+ self.assertEqual(self.evaluate(m.count), 3)
+
+ # check reset_states()
+ m.reset_states()
+ self.assertEqual(self.evaluate(m.total), 0)
+ self.assertEqual(self.evaluate(m.count), 0)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_mean_with_sample_weight(self):
+ m = metrics.Mean(dtype=dtypes.float64)
+ self.assertEqual(m.dtype, dtypes.float64)
+ self.evaluate(variables.global_variables_initializer())
+
+ # check scalar weight
+ result_t = m(100, sample_weight=0.5)
+ self.assertEqual(self.evaluate(result_t), 50 / 0.5)
+ self.assertEqual(self.evaluate(m.total), 50)
+ self.assertEqual(self.evaluate(m.count), 0.5)
+
+ # check weights not scalar and weights rank matches values rank
+ result_t = m([1, 5], sample_weight=[1, 0.2])
+ result = self.evaluate(result_t)
+ self.assertAlmostEqual(result, 52 / 1.7, 2)
+ self.assertAlmostEqual(self.evaluate(m.total), 52, 2) # 50 + 1 + 5 * 0.2
+ self.assertAlmostEqual(self.evaluate(m.count), 1.7, 2) # 0.5 + 1.2
+
+ # check weights broadcast
+ result_t = m([1, 2], sample_weight=0.5)
+ self.assertAlmostEqual(self.evaluate(result_t), 53.5 / 2.7, 2)
+ self.assertAlmostEqual(self.evaluate(m.total), 53.5, 2) # 52 + 0.5 + 1
+ self.assertAlmostEqual(self.evaluate(m.count), 2.7, 2) # 1.7 + 0.5 + 0.5
+
+ # check weights squeeze
+ result_t = m([1, 5], sample_weight=[[1], [0.2]])
+ self.assertAlmostEqual(self.evaluate(result_t), 55.5 / 3.9, 2)
+ self.assertAlmostEqual(self.evaluate(m.total), 55.5, 2) # 53.5 + 1 + 1
+ self.assertAlmostEqual(self.evaluate(m.count), 3.9, 2) # 2.7 + 1.2
+
+ # check weights expand
+ result_t = m([[1], [5]], sample_weight=[1, 0.2])
+ self.assertAlmostEqual(self.evaluate(result_t), 57.5 / 5.1, 2)
+ self.assertAlmostEqual(self.evaluate(m.total), 57.5, 2) # 55.5 + 1 + 1
+ self.assertAlmostEqual(self.evaluate(m.count), 5.1, 2) # 3.9 + 1.2
+
+ def test_mean_graph_with_placeholder(self):
+ with context.graph_mode(), self.test_session() as sess:
+ m = metrics.Mean()
+ v = array_ops.placeholder(dtypes.float32)
+ w = array_ops.placeholder(dtypes.float32)
+ sess.run(variables.global_variables_initializer())
+
+ # check __call__()
+ result_t = m(v, sample_weight=w)
+ result = sess.run(result_t, feed_dict=({v: 100, w: 0.5}))
+ self.assertEqual(sess.run(m.total), 50)
+ self.assertEqual(sess.run(m.count), 0.5)
+ self.assertEqual(result, 50 / 0.5)
+
+ # check update_state() and result()
+ result = sess.run(result_t, feed_dict=({v: [1, 5], w: [1, 0.2]}))
+ self.assertAlmostEqual(sess.run(m.total), 52, 2) # 50 + 1 + 5 * 0.2
+ self.assertAlmostEqual(sess.run(m.count), 1.7, 2) # 0.5 + 1.2
+ self.assertAlmostEqual(result, 52 / 1.7, 2)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_save_restore(self):
+ checkpoint_directory = self.get_temp_dir()
+ checkpoint_prefix = os.path.join(checkpoint_directory, 'ckpt')
+ m = metrics.Mean()
+ checkpoint = checkpointable_utils.Checkpoint(mean=m)
+ self.evaluate(variables.global_variables_initializer())
+
+ # update state
+ self.evaluate(m(100.))
+ self.evaluate(m(200.))
+
+ # save checkpoint and then add an update
+ save_path = checkpoint.save(checkpoint_prefix)
+ self.evaluate(m(1000.))
+
+ # restore to the same checkpoint mean object
+ checkpoint.restore(save_path).assert_consumed().run_restore_ops()
+ self.evaluate(m(300.))
+ self.assertEqual(200., self.evaluate(m.result()))
+
+ # restore to a different checkpoint mean object
+ restore_mean = metrics.Mean()
+ restore_checkpoint = checkpointable_utils.Checkpoint(mean=restore_mean)
+ status = restore_checkpoint.restore(save_path)
+ restore_update = restore_mean(300.)
+ status.assert_consumed().run_restore_ops()
+ self.evaluate(restore_update)
+ self.assertEqual(200., self.evaluate(restore_mean.result()))
+ self.assertEqual(3, self.evaluate(restore_mean.count))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_binary_accuracy(self):
+ acc_obj = metrics.BinaryAccuracy(name='my acc')
+
+ # check config
+ self.assertEqual(acc_obj.name, 'my acc')
+ self.assertTrue(acc_obj.stateful)
+ self.assertEqual(len(acc_obj.variables), 2)
+ self.assertEqual(acc_obj.dtype, dtypes.float32)
+ self.evaluate(variables.global_variables_initializer())
+
+ # verify that correct value is returned
+ update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
+ self.evaluate(update_op)
+ result = self.evaluate(acc_obj.result())
+ self.assertEqual(result, 1) # 2/2
+
+ # check y_pred squeeze
+ update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
+ self.evaluate(update_op)
+ result = self.evaluate(acc_obj.result())
+ self.assertAlmostEqual(result, 0.75, 2) # 3/4
+
+ # check y_true squeeze
+ result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
+ result = self.evaluate(result_t)
+ self.assertAlmostEqual(result, 0.67, 2) # 4/6
+
+ # check with sample_weight
+ result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
+ result = self.evaluate(result_t)
+ self.assertAlmostEqual(result, 0.67, 2) # 4.5/6.7
+
+ # check incompatible shapes
+ with self.assertRaisesRegexp(ValueError,
+ r'Shapes \(1,\) and \(2,\) are incompatible'):
+ acc_obj.update_state([1, 1], [1])
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_binary_accuracy_threshold(self):
+ acc_obj = metrics.BinaryAccuracy(threshold=0.7)
+ self.evaluate(variables.global_variables_initializer())
+ result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
+ result = self.evaluate(result_t)
+ self.assertAlmostEqual(result, 0.5, 2)
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_invalid_result(self):
+
+ class InvalidResult(metrics.Metric):
+
+ def __init__(self, name='invalid-result', dtype=dtypes.float64):
+ super(InvalidResult, self).__init__(name=name, dtype=dtype)
+
+ def update_state(self, *args, **kwargs):
+ pass
+
+ def result(self):
+ return 1
+
+ invalid_result_obj = InvalidResult()
+ with self.assertRaisesRegexp(
+ TypeError,
+ 'Metric invalid-result\'s result must be a Tensor or Operation, given:'
+ ):
+ invalid_result_obj.result()
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_invalid_update(self):
+
+ class InvalidUpdate(metrics.Metric):
+
+ def __init__(self, name='invalid-update', dtype=dtypes.float64):
+ super(InvalidUpdate, self).__init__(name=name, dtype=dtype)
+
+ def update_state(self, *args, **kwargs):
+ return [1]
+
+ def result(self):
+ pass
+
+ invalid_update_obj = InvalidUpdate()
+ with self.assertRaisesRegexp(
+ TypeError,
+ 'Metric invalid-update\'s update must be a Tensor or Operation, given:'
+ ):
+ invalid_update_obj.update_state()
+
if __name__ == '__main__':
test.main()
diff --git a/tensorflow/python/keras/model_subclassing_test.py b/tensorflow/python/keras/model_subclassing_test.py
index 3ac4852eff..5fbc191e78 100644
--- a/tensorflow/python/keras/model_subclassing_test.py
+++ b/tensorflow/python/keras/model_subclassing_test.py
@@ -29,6 +29,8 @@ from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import embedding_ops
+from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import data_structures
@@ -65,6 +67,22 @@ class SimpleTestModel(keras.Model):
return self.dense2(x)
+class SimpleConvTestModel(keras.Model):
+
+ def __init__(self, num_classes=10):
+ super(SimpleConvTestModel, self).__init__(name='test_model')
+ self.num_classes = num_classes
+
+ self.conv1 = keras.layers.Conv2D(32, (3, 3), activation='relu')
+ self.flatten = keras.layers.Flatten()
+ self.dense1 = keras.layers.Dense(num_classes, activation='softmax')
+
+ def call(self, x):
+ x = self.conv1(x)
+ x = self.flatten(x)
+ return self.dense1(x)
+
+
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
@@ -174,6 +192,213 @@ def get_nested_model_3(input_dim, num_classes):
class ModelSubclassingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
+ def test_invalid_input_shape_build(self):
+ num_classes = 2
+ input_dim = 50
+
+ model = SimpleTestModel(num_classes=num_classes,
+ use_dp=True,
+ use_bn=True)
+
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ with self.assertRaisesRegexp(
+ ValueError, 'input shape is not one of the valid types'):
+ model.build(input_shape=tensor_shape.Dimension(input_dim))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_embed_dtype_with_subclass_build(self):
+ class Embedding(keras.layers.Layer):
+ """An Embedding layer."""
+
+ def __init__(self, vocab_size, embedding_dim, **kwargs):
+ super(Embedding, self).__init__(**kwargs)
+ self.vocab_size = vocab_size
+ self.embedding_dim = embedding_dim
+
+ def build(self, _):
+ self.embedding = self.add_variable(
+ 'embedding_kernel',
+ shape=[self.vocab_size, self.embedding_dim],
+ dtype=np.float32,
+ initializer=init_ops.random_uniform_initializer(-0.1, 0.1),
+ trainable=True)
+
+ def call(self, x):
+ return embedding_ops.embedding_lookup(self.embedding, x)
+
+ class EmbedModel(keras.Model):
+
+ def __init__(self, vocab_size, embed_size):
+ super(EmbedModel, self).__init__()
+ self.embed1 = Embedding(vocab_size, embed_size)
+
+ def call(self, inputs):
+ return self.embed1(inputs)
+
+ model = EmbedModel(100, 20)
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ with self.assertRaisesRegexp(
+ ValueError, 'if your layers do not support float type inputs'):
+ model.build(input_shape=(35, 20))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_single_time_step_rnn_build(self):
+ dim = 4
+ timesteps = 1
+ batch_input_shape = (None, timesteps, dim)
+ units = 3
+
+ class SimpleRNNModel(keras.Model):
+
+ def __init__(self):
+ super(SimpleRNNModel, self).__init__()
+ self.lstm = keras.layers.LSTM(units)
+
+ def call(self, inputs):
+ return self.lstm(inputs)
+
+ model = SimpleRNNModel()
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ model.build(batch_input_shape)
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+ model(array_ops.ones((32, timesteps, dim)))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_single_io_subclass_build(self):
+ num_classes = 2
+ input_dim = 50
+ batch_size = None
+
+ model = SimpleTestModel(num_classes=num_classes,
+ use_dp=True,
+ use_bn=True)
+
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ model.build(input_shape=(batch_size, input_dim))
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+ model(array_ops.ones((32, input_dim)))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_single_io_dimension_subclass_build(self):
+ num_classes = 2
+ input_dim = tensor_shape.Dimension(50)
+ batch_size = tensor_shape.Dimension(None)
+
+ model = SimpleTestModel(num_classes=num_classes,
+ use_dp=True,
+ use_bn=True)
+
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ model.build(input_shape=(batch_size, input_dim))
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+ model(array_ops.ones((32, input_dim)))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_multidim_io_subclass_build(self):
+ num_classes = 10
+ # Input size, e.g. image
+ batch_size = 32
+ input_shape = (32, 32, 3)
+
+ model = SimpleConvTestModel(num_classes)
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ batch_input_shape = (batch_size,) + input_shape
+ model.build(input_shape=batch_input_shape)
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+
+ model(array_ops.ones(batch_input_shape))
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_tensorshape_io_subclass_build(self):
+ num_classes = 10
+ # Input size, e.g. image
+ batch_size = None
+ input_shape = (32, 32, 3)
+
+ model = SimpleConvTestModel(num_classes)
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ model.build(
+ input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+
+ model(array_ops.ones((32,) + input_shape))
+
+ def test_subclass_save_model(self):
+ num_classes = 10
+ # Input size, e.g. image
+ batch_size = None
+ input_shape = (32, 32, 3)
+
+ model = SimpleConvTestModel(num_classes)
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ model.build(
+ input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+ weights = model.get_weights()
+
+ tf_format_name = os.path.join(self.get_temp_dir(), 'ckpt')
+ model.save_weights(tf_format_name)
+ if h5py is not None:
+ hdf5_format_name = os.path.join(self.get_temp_dir(), 'weights.h5')
+ model.save_weights(hdf5_format_name)
+
+ model = SimpleConvTestModel(num_classes)
+ model.build(
+ input_shape=tensor_shape.TensorShape((batch_size,) + input_shape))
+ if h5py is not None:
+ model.load_weights(hdf5_format_name)
+ self.assertAllClose(weights, model.get_weights())
+ model.load_weights(tf_format_name)
+ self.assertAllClose(weights, model.get_weights())
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_multi_io_subclass_build(self):
+ batch_size = None
+ num_samples = 1000
+ input_dim = 50
+ model = MultiIOTestModel()
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ batch_input_shape = tensor_shape.TensorShape((batch_size, input_dim))
+ model.build(
+ input_shape=[batch_input_shape, batch_input_shape])
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+ x1 = array_ops.ones((num_samples, input_dim))
+ x2 = array_ops.ones((num_samples, input_dim))
+ model([x1, x2])
+
+ @test_util.run_in_graph_and_eager_modes
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
@@ -750,6 +975,16 @@ class CustomCallModel(keras.Model):
return combined
+class TrainingNoDefaultModel(keras.Model):
+
+ def __init__(self):
+ super(TrainingNoDefaultModel, self).__init__()
+ self.dense1 = keras.layers.Dense(1)
+
+ def call(self, x, training):
+ return self.dense1(x)
+
+
class CustomCallSignatureTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@@ -767,6 +1002,32 @@ class CustomCallSignatureTests(test.TestCase):
self.assertAllClose(expected_output, self.evaluate(output))
@test_util.run_in_graph_and_eager_modes
+ def test_training_args_call_build(self):
+ input_dim = 2
+
+ model = TrainingNoDefaultModel()
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ model.build((None, input_dim))
+ self.assertTrue(model.weights, ('Model should have weights now that it '
+ 'has been properly built.'))
+ self.assertTrue(model.built, 'Model should be built after calling `build`.')
+
+ @test_util.run_in_graph_and_eager_modes
+ def test_custom_call_kwargs_and_build(self):
+ first_input_shape = (2, 3)
+ second_input_shape = (2, 5)
+
+ model = CustomCallModel()
+ self.assertFalse(model.built, 'Model should not have been built')
+ self.assertFalse(model.weights, ('Model should have no weights since it '
+ 'has not been built.'))
+ with self.assertRaisesRegexp(
+ ValueError, 'cannot build your model if it has positional'):
+ model.build(input_shape=[first_input_shape, second_input_shape])
+
+ @test_util.run_in_graph_and_eager_modes
def test_inputs_in_signature(self):
class HasInputsAndOtherPositional(keras.Model):
@@ -829,14 +1090,9 @@ class CustomCallSignatureTests(test.TestCase):
def test_training_no_default(self):
- class TrainingNoDefault(keras.Model):
-
- def call(self, x, training):
- return x
-
with context.graph_mode():
- model = TrainingNoDefault()
- arg = array_ops.ones([])
+ model = TrainingNoDefaultModel()
+ arg = array_ops.ones([1, 1])
model(arg, True)
six.assertCountEqual(self, [arg], model.inputs)
diff --git a/tensorflow/python/keras/testing_utils.py b/tensorflow/python/keras/testing_utils.py
index 17aba7d86c..6e8ee06ff5 100644
--- a/tensorflow/python/keras/testing_utils.py
+++ b/tensorflow/python/keras/testing_utils.py
@@ -18,7 +18,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from collections import OrderedDict
import numpy as np
from tensorflow.python import keras
@@ -185,75 +184,3 @@ def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
# for further checks in the caller function
return actual_output
-
-def _combine_named_parameters(**kwargs):
- """Generate combinations based on its keyword arguments.
-
- Two sets of returned combinations can be concatenated using +. Their product
- can be computed using `times()`.
-
- Args:
- **kwargs: keyword arguments of form `option=[possibilities, ...]`
- or `option=the_only_possibility`.
-
- Returns:
- a list of dictionaries for each combination. Keys in the dictionaries are
- the keyword argument names. Each key has one value - one of the
- corresponding keyword argument values.
- """
- if not kwargs:
- return [OrderedDict()]
-
- sort_by_key = lambda k: k[0][0]
- kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
- first = list(kwargs.items())[0]
-
- rest = dict(list(kwargs.items())[1:])
- rest_combined = _combine_named_parameters(**rest)
-
- key = first[0]
- values = first[1]
- if not isinstance(values, list):
- values = [values]
-
- combinations = [
- OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
- for v in values
- for combined in rest_combined
- ]
- return combinations
-
-
-def generate_combinations_with_testcase_name(**kwargs):
- """Generate combinations based on its keyword arguments using combine().
-
- This function calls combine() and appends a testcase name to the list of
- dictionaries returned. The 'testcase_name' key is a required for named
- parameterized tests.
-
- Args:
- **kwargs: keyword arguments of form `option=[possibilities, ...]`
- or `option=the_only_possibility`.
-
- Returns:
- a list of dictionaries for each combination. Keys in the dictionaries are
- the keyword argument names. Each key has one value - one of the
- corresponding keyword argument values.
- """
- combinations = _combine_named_parameters(**kwargs)
- named_combinations = []
- for combination in combinations:
- assert isinstance(combination, OrderedDict)
- name = ''.join([
- '_{}_{}'.format(
- ''.join(filter(str.isalnum, key)),
- ''.join(filter(str.isalnum, str(value))))
- for key, value in combination.items()
- ])
- named_combinations.append(
- OrderedDict(
- list(combination.items()) + [('testcase_name',
- '_test{}'.format(name))]))
-
- return named_combinations
-
diff --git a/tensorflow/python/keras/utils/np_utils.py b/tensorflow/python/keras/utils/np_utils.py
index 9d9c72b162..c24e87308b 100644
--- a/tensorflow/python/keras/utils/np_utils.py
+++ b/tensorflow/python/keras/utils/np_utils.py
@@ -33,7 +33,8 @@ def to_categorical(y, num_classes=None):
num_classes: total number of classes.
Returns:
- A binary matrix representation of the input.
+ A binary matrix representation of the input. The classes axis is placed
+ last.
"""
y = np.array(y, dtype='int')
input_shape = y.shape
diff --git a/tensorflow/python/kernel_tests/BUILD b/tensorflow/python/kernel_tests/BUILD
index 838cf836f1..adf97569ab 100644
--- a/tensorflow/python/kernel_tests/BUILD
+++ b/tensorflow/python/kernel_tests/BUILD
@@ -2845,7 +2845,6 @@ cuda_py_test(
"//tensorflow/python:math_ops",
],
shard_count = 20,
- tags = ["nomsan"], # TODO(b/110990716) reenable
)
cuda_py_test(
@@ -3095,7 +3094,7 @@ tf_py_test(
tf_py_test(
name = "cond_v2_test",
- size = "small",
+ size = "medium",
srcs = ["cond_v2_test.py"],
additional_deps = [
"//tensorflow/python:array_ops",
@@ -3110,4 +3109,5 @@ tf_py_test(
"//tensorflow/python:training",
],
grpc_enabled = True,
+ tags = ["no_gpu"], # TODO(b/111656070)
)
diff --git a/tensorflow/python/kernel_tests/argmax_op_test.py b/tensorflow/python/kernel_tests/argmax_op_test.py
index ce06769902..1202c463e8 100644
--- a/tensorflow/python/kernel_tests/argmax_op_test.py
+++ b/tensorflow/python/kernel_tests/argmax_op_test.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@@ -115,6 +116,12 @@ class ArgMaxTest(test.TestCase):
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
+ def testOutputEmpty(self):
+ with self.test_session():
+ for op in math_ops.argmin, math_ops.argmax:
+ ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
+ self.assertEqual(ret.shape, (1, 0))
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/kernel_tests/bitcast_op_test.py b/tensorflow/python/kernel_tests/bitcast_op_test.py
index a535468b05..a2c6b54273 100644
--- a/tensorflow/python/kernel_tests/bitcast_op_test.py
+++ b/tensorflow/python/kernel_tests/bitcast_op_test.py
@@ -76,12 +76,18 @@ class BitcastTest(test.TestCase):
datatype = dtypes.int8
array_ops.bitcast(x, datatype, None)
- def testQuantizeType(self):
+ def testQuantizedType(self):
shape = [3, 4]
x = np.zeros(shape, np.uint16)
datatype = dtypes.quint16
self._testBitcast(x, datatype, shape)
+ def testUnsignedType(self):
+ shape = [3, 4]
+ x = np.zeros(shape, np.int64)
+ datatype = dtypes.uint64
+ self._testBitcast(x, datatype, shape)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/kernel_tests/cond_v2_test.py b/tensorflow/python/kernel_tests/cond_v2_test.py
index 759db5d5f4..97ce245fc8 100644
--- a/tensorflow/python/kernel_tests/cond_v2_test.py
+++ b/tensorflow/python/kernel_tests/cond_v2_test.py
@@ -22,6 +22,7 @@ from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
@@ -35,10 +36,12 @@ from tensorflow.python.training import saver
from tensorflow.python.util import compat
-class NewCondTest(test.TestCase):
+class CondV2Test(test.TestCase):
- def _testCond(self, true_fn, false_fn, train_vals):
- with self.test_session() as sess:
+ def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
+ if not feed_dict:
+ feed_dict = {}
+ with self.test_session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
@@ -47,13 +50,17 @@ class NewCondTest(test.TestCase):
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
+ sess_run_args = {pred: True}
+ sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
- (expected, actual, expected_grad, actual_grad), {pred: True})
+ (expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
+ sess_run_args = {pred: False}
+ sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
- (expected, actual, expected_grad, actual_grad), {pred: False})
+ (expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
@@ -131,6 +138,349 @@ class NewCondTest(test.TestCase):
self.assertIn("foo_cond_1_true", ops.get_default_graph()._functions)
self.assertIn("foo_cond_1_false", ops.get_default_graph()._functions)
+ def testDefunInCond(self):
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+
+ @function.Defun()
+ def fn():
+ return x * y * 2.0
+
+ return fn()
+
+ def false_fn():
+ return 2.0
+
+ self._testCond(true_fn, false_fn, [x])
+ self._testCond(true_fn, false_fn, [x, y])
+ self._testCond(true_fn, false_fn, [y])
+
+ def testNestedDefunInCond(self):
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+ return 2.0
+
+ def false_fn():
+
+ @function.Defun()
+ def fn():
+
+ @function.Defun()
+ def nested_fn():
+ return x * y * 2.0
+
+ return nested_fn()
+
+ return fn()
+
+ self._testCond(true_fn, false_fn, [x])
+ self._testCond(true_fn, false_fn, [x, y])
+ self._testCond(true_fn, false_fn, [y])
+
+ def testDoubleNestedDefunInCond(self):
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+
+ @function.Defun()
+ def fn():
+
+ @function.Defun()
+ def nested_fn():
+
+ @function.Defun()
+ def nested_nested_fn():
+ return x * y * 2.0
+
+ return nested_nested_fn()
+
+ return nested_fn()
+
+ return fn()
+
+ def false_fn():
+ return 2.0
+
+ self._testCond(true_fn, false_fn, [x])
+ self._testCond(true_fn, false_fn, [x, y])
+ self._testCond(true_fn, false_fn, [y])
+
+ def testNestedCond(self):
+
+ def run_test(pred_value):
+
+ def build_graph():
+ pred = array_ops.placeholder(dtypes.bool, name="pred")
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+ return 2.0
+
+ def false_fn():
+
+ def false_true_fn():
+ return x * y * 2.0
+
+ def false_false_fn():
+ return x * 5.0
+
+ return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
+
+ return x, y, pred, true_fn, false_fn
+
+ with ops.Graph().as_default():
+ x, y, pred, true_fn, false_fn = build_graph()
+ self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
+ self._testCond(true_fn, false_fn, [x], {pred: pred_value})
+ self._testCond(true_fn, false_fn, [y], {pred: pred_value})
+
+ run_test(True)
+ run_test(False)
+
+ def testDoubleNestedCond(self):
+
+ def run_test(pred1_value, pred2_value):
+
+ def build_graph():
+ pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
+ pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+ return 2.0
+
+ def false_fn():
+
+ def false_true_fn():
+
+ def false_true_true_fn():
+ return x * y * 2.0
+
+ def false_true_false_fn():
+ return x * 10.0
+
+ return _cond(
+ pred1,
+ false_true_true_fn,
+ false_true_false_fn,
+ name="inside_false_true_fn")
+
+ def false_false_fn():
+ return x * 5.0
+
+ return _cond(
+ pred2, false_true_fn, false_false_fn, name="inside_false_fn")
+
+ return x, y, pred1, pred2, true_fn, false_fn
+
+ with ops.Graph().as_default():
+ x, y, pred1, pred2, true_fn, false_fn = build_graph()
+ self._testCond(true_fn, false_fn, [x, y], {
+ pred1: pred1_value,
+ pred2: pred2_value
+ })
+ x, y, pred1, pred2, true_fn, false_fn = build_graph()
+ self._testCond(true_fn, false_fn, [x], {
+ pred1: pred1_value,
+ pred2: pred2_value
+ })
+ x, y, pred1, pred2, true_fn, false_fn = build_graph()
+ self._testCond(true_fn, false_fn, [y], {
+ pred1: pred1_value,
+ pred2: pred2_value
+ })
+
+ run_test(True, True)
+ run_test(True, False)
+ run_test(False, False)
+ run_test(False, True)
+
+ def testGradientFromInsideDefun(self):
+
+ def build_graph():
+ pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
+ pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+ return 2.0
+
+ def false_fn():
+
+ def inner_true_fn():
+ return x * y * 2.0
+
+ def inner_false_fn():
+ return x * 5.0
+
+ return cond_v2.cond_v2(
+ pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
+
+ cond_outer = cond_v2.cond_v2(
+ pred_outer, true_fn, false_fn, name="outer_cond")
+
+ # Compute grads inside a Defun.
+ @function.Defun()
+ def nesting_fn():
+ return gradients_impl.gradients(cond_outer, [x, y])
+
+ grads = nesting_fn()
+
+ return grads, pred_outer, pred_inner
+
+ with ops.Graph().as_default():
+ grads, pred_outer, pred_inner = build_graph()
+ with self.test_session(graph=ops.get_default_graph()) as sess:
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: True,
+ pred_inner: True
+ }), [0., 0.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: True,
+ pred_inner: False
+ }), [0., 0.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: False,
+ pred_inner: True
+ }), [4., 2.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: False,
+ pred_inner: False
+ }), [5., 0.])
+
+ def testGradientFromInsideNestedDefun(self):
+
+ def build_graph():
+ pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
+ pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ def true_fn():
+ return 2.0
+
+ def false_fn():
+
+ def inner_true_fn():
+ return x * y * 2.0
+
+ def inner_false_fn():
+ return x * 5.0
+
+ return cond_v2.cond_v2(
+ pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
+
+ cond_outer = cond_v2.cond_v2(
+ pred_outer, true_fn, false_fn, name="outer_cond")
+
+ # Compute grads inside a Defun.
+ @function.Defun()
+ def nesting_fn():
+
+ @function.Defun()
+ def inner_nesting_fn():
+ return gradients_impl.gradients(cond_outer, [x, y])
+
+ return inner_nesting_fn()
+
+ grads = nesting_fn()
+
+ return grads, pred_outer, pred_inner
+
+ with ops.Graph().as_default():
+ grads, pred_outer, pred_inner = build_graph()
+ with self.test_session(graph=ops.get_default_graph()) as sess:
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: True,
+ pred_inner: True
+ }), [0., 0.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: True,
+ pred_inner: False
+ }), [0., 0.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: False,
+ pred_inner: True
+ }), [4., 2.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: False,
+ pred_inner: False
+ }), [5., 0.])
+
+ def testBuildCondAndGradientInsideDefun(self):
+
+ def build_graph():
+ pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
+ pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
+ x = constant_op.constant(1.0, name="x")
+ y = constant_op.constant(2.0, name="y")
+
+ # Build cond and its gradient inside a Defun.
+ @function.Defun()
+ def fn():
+
+ def true_fn():
+ return 2.0
+
+ def false_fn():
+
+ def inner_true_fn():
+ return x * y * 2.0
+
+ def inner_false_fn():
+ return x * 5.0
+
+ return cond_v2.cond_v2(
+ pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
+
+ cond_outer = cond_v2.cond_v2(
+ pred_outer, true_fn, false_fn, name="outer_cond")
+ return gradients_impl.gradients(cond_outer, [x, y])
+
+ grads = fn()
+
+ return grads, pred_outer, pred_inner
+
+ with ops.Graph().as_default():
+ grads, pred_outer, pred_inner = build_graph()
+ with self.test_session(graph=ops.get_default_graph()) as sess:
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: True,
+ pred_inner: True
+ }), [0., 0.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: True,
+ pred_inner: False
+ }), [0., 0.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: False,
+ pred_inner: True
+ }), [4., 2.])
+ self.assertSequenceEqual(
+ sess.run(grads, {
+ pred_outer: False,
+ pred_inner: False
+ }), [5., 0.])
+
def testSecondDerivative(self):
with self.test_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
@@ -532,5 +882,17 @@ class CondV2ColocationGroupAndDeviceTest(test.TestCase):
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
+def _cond(pred, true_fn, false_fn, name):
+ if _is_old_cond():
+ return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
+ else:
+ return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
+
+
+def _is_old_cond():
+ return isinstance(ops.get_default_graph()._get_control_flow_context(),
+ control_flow_ops.CondContext)
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/kernel_tests/distributions/util_test.py b/tensorflow/python/kernel_tests/distributions/util_test.py
index 9d38ffcb4a..61faa8466e 100644
--- a/tensorflow/python/kernel_tests/distributions/util_test.py
+++ b/tensorflow/python/kernel_tests/distributions/util_test.py
@@ -311,8 +311,10 @@ class EmbedCheckCategoricalEventShapeTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUnsupportedDtype(self):
with self.test_session():
+ param = ops.convert_to_tensor(
+ np.ones([2**11 + 1]).astype(dtypes.qint16.as_numpy_dtype),
+ dtype=dtypes.qint16)
with self.assertRaises(TypeError):
- param = array_ops.ones([int(2**11+1)], dtype=dtypes.qint16)
du.embed_check_categorical_event_shape(param)
diff --git a/tensorflow/python/kernel_tests/gather_nd_op_test.py b/tensorflow/python/kernel_tests/gather_nd_op_test.py
index 58e2a8ac2a..c0b419e1d1 100644
--- a/tensorflow/python/kernel_tests/gather_nd_op_test.py
+++ b/tensorflow/python/kernel_tests/gather_nd_op_test.py
@@ -203,8 +203,7 @@ class GatherNdTest(test.TestCase):
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
- r"flat indices\[1, :\] = \[7\] does not index into param "
- r"\(shape: \[3\]\)"):
+ r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
gather_nd.eval()
def _disabledTestBadIndicesGPU(self):
@@ -217,8 +216,7 @@ class GatherNdTest(test.TestCase):
indices = [[[0], [7]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
- r"flat indices\[1, :\] = \[7\] does not index into param "
- r"\(shape: \[3\]\)"):
+ r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
gather_nd.eval()
def testBadIndicesWithSlicesCPU(self):
@@ -227,8 +225,7 @@ class GatherNdTest(test.TestCase):
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
- r"flat indices\[2, :\] = \[1\] does not index into param "
- r"\(shape: \[1,3\]\)"):
+ r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
gather_nd.eval()
def _disabledTestBadIndicesWithSlicesGPU(self):
@@ -241,8 +238,7 @@ class GatherNdTest(test.TestCase):
indices = [[[0], [0], [1]]] # Make this one higher rank
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
- r"flat indices\[2, :\] = \[1\] does not index into param "
- r"\(shape: \[1,3\]\)"):
+ r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
gather_nd.eval()
def testGradientsRank2Elements(self):
diff --git a/tensorflow/python/kernel_tests/linalg/BUILD b/tensorflow/python/kernel_tests/linalg/BUILD
index 69d3aa4017..f4ec3e3996 100644
--- a/tensorflow/python/kernel_tests/linalg/BUILD
+++ b/tensorflow/python/kernel_tests/linalg/BUILD
@@ -197,7 +197,7 @@ cuda_py_test(
cuda_py_test(
name = "linear_operator_low_rank_update_test",
- size = "medium",
+ size = "large",
srcs = ["linear_operator_low_rank_update_test.py"],
additional_deps = [
"//tensorflow/python/ops/linalg",
@@ -234,3 +234,21 @@ cuda_py_test(
"optonly",
],
)
+
+cuda_py_test(
+ name = "linear_operator_zeros_test",
+ size = "medium",
+ srcs = ["linear_operator_zeros_test.py"],
+ additional_deps = [
+ "//tensorflow/python/ops/linalg",
+ "//tensorflow/python:array_ops",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework",
+ "//tensorflow/python:framework_test_lib",
+ "//tensorflow/python:linalg_ops",
+ "//tensorflow/python:platform_test",
+ "//tensorflow/python:random_ops",
+ ],
+ shard_count = 5,
+ tags = ["optonly"], # Test is flaky without optimization.
+)
diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
new file mode 100644
index 0000000000..8f60b55e0a
--- /dev/null
+++ b/tensorflow/python/kernel_tests/linalg/linear_operator_zeros_test.py
@@ -0,0 +1,192 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import random_seed
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops.linalg import linalg as linalg_lib
+from tensorflow.python.ops.linalg import linear_operator_test_util
+from tensorflow.python.platform import test
+
+
+random_seed.set_random_seed(23)
+rng = np.random.RandomState(2016)
+
+
+class LinearOperatorZerosTest(
+ linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
+ """Most tests done in the base class LinearOperatorDerivedClassTest."""
+
+ @property
+ def _tests_to_skip(self):
+ return ["log_abs_det", "solve", "solve_with_broadcast"]
+
+ @property
+ def _operator_build_infos(self):
+ build_info = linear_operator_test_util.OperatorBuildInfo
+ return [
+ build_info((1, 1)),
+ build_info((1, 3, 3)),
+ build_info((3, 4, 4)),
+ build_info((2, 1, 4, 4))]
+
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
+ del use_placeholder
+ shape = list(build_info.shape)
+ assert shape[-1] == shape[-2]
+
+ batch_shape = shape[:-2]
+ num_rows = shape[-1]
+
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows, batch_shape=batch_shape, dtype=dtype)
+ matrix = array_ops.zeros(shape=shape, dtype=dtype)
+
+ return operator, matrix
+
+ def test_assert_positive_definite(self):
+ operator = linalg_lib.LinearOperatorZeros(num_rows=2)
+ with self.assertRaisesOpError("non-positive definite"):
+ operator.assert_positive_definite()
+
+ def test_assert_non_singular(self):
+ with self.assertRaisesOpError("non-invertible"):
+ operator = linalg_lib.LinearOperatorZeros(num_rows=2)
+ operator.assert_non_singular()
+
+ def test_assert_self_adjoint(self):
+ with self.test_session():
+ operator = linalg_lib.LinearOperatorZeros(num_rows=2)
+ operator.assert_self_adjoint().run() # Should not fail
+
+ def test_non_scalar_num_rows_raises_static(self):
+ with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
+ linalg_lib.LinearOperatorZeros(num_rows=[2])
+ with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
+ linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=[2])
+
+ def test_non_integer_num_rows_raises_static(self):
+ with self.assertRaisesRegexp(TypeError, "must be integer"):
+ linalg_lib.LinearOperatorZeros(num_rows=2.)
+ with self.assertRaisesRegexp(TypeError, "must be integer"):
+ linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=2.)
+
+ def test_negative_num_rows_raises_static(self):
+ with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+ linalg_lib.LinearOperatorZeros(num_rows=-2)
+ with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+ linalg_lib.LinearOperatorZeros(num_rows=2, num_columns=-2)
+
+ def test_non_1d_batch_shape_raises_static(self):
+ with self.assertRaisesRegexp(ValueError, "must be a 1-D"):
+ linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=2)
+
+ def test_non_integer_batch_shape_raises_static(self):
+ with self.assertRaisesRegexp(TypeError, "must be integer"):
+ linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[2.])
+
+ def test_negative_batch_shape_raises_static(self):
+ with self.assertRaisesRegexp(ValueError, "must be non-negative"):
+ linalg_lib.LinearOperatorZeros(num_rows=2, batch_shape=[-2])
+
+ def test_non_scalar_num_rows_raises_dynamic(self):
+ with self.test_session():
+ num_rows = array_ops.placeholder(dtypes.int32)
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows, assert_proper_shapes=True)
+ with self.assertRaisesOpError("must be a 0-D Tensor"):
+ operator.to_dense().eval(feed_dict={num_rows: [2]})
+
+ def test_negative_num_rows_raises_dynamic(self):
+ with self.test_session():
+ n = array_ops.placeholder(dtypes.int32)
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows=n, assert_proper_shapes=True)
+ with self.assertRaisesOpError("must be non-negative"):
+ operator.to_dense().eval(feed_dict={n: -2})
+
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows=2, num_columns=n, assert_proper_shapes=True)
+ with self.assertRaisesOpError("must be non-negative"):
+ operator.to_dense().eval(feed_dict={n: -2})
+
+ def test_non_1d_batch_shape_raises_dynamic(self):
+ with self.test_session():
+ batch_shape = array_ops.placeholder(dtypes.int32)
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
+ with self.assertRaisesOpError("must be a 1-D"):
+ operator.to_dense().eval(feed_dict={batch_shape: 2})
+
+ def test_negative_batch_shape_raises_dynamic(self):
+ with self.test_session():
+ batch_shape = array_ops.placeholder(dtypes.int32)
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
+ with self.assertRaisesOpError("must be non-negative"):
+ operator.to_dense().eval(feed_dict={batch_shape: [-2]})
+
+ def test_wrong_matrix_dimensions_raises_static(self):
+ operator = linalg_lib.LinearOperatorZeros(num_rows=2)
+ x = rng.randn(3, 3).astype(np.float32)
+ with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
+ operator.matmul(x)
+
+ def test_wrong_matrix_dimensions_raises_dynamic(self):
+ num_rows = array_ops.placeholder(dtypes.int32)
+ x = array_ops.placeholder(dtypes.float32)
+
+ with self.test_session():
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows, assert_proper_shapes=True)
+ y = operator.matmul(x)
+ with self.assertRaisesOpError("Incompatible.*dimensions"):
+ y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
+
+ def test_is_x_flags(self):
+ # The is_x flags are by default all True.
+ operator = linalg_lib.LinearOperatorZeros(num_rows=2)
+ self.assertFalse(operator.is_positive_definite)
+ self.assertFalse(operator.is_non_singular)
+ self.assertTrue(operator.is_self_adjoint)
+
+
+class LinearOperatorZerosNotSquareTest(
+ linear_operator_test_util.NonSquareLinearOperatorDerivedClassTest):
+
+ def _operator_and_matrix(self, build_info, dtype, use_placeholder):
+ del use_placeholder
+ shape = list(build_info.shape)
+
+ batch_shape = shape[:-2]
+ num_rows = shape[-2]
+ num_columns = shape[-1]
+
+ operator = linalg_lib.LinearOperatorZeros(
+ num_rows, num_columns, is_square=False, is_self_adjoint=False,
+ batch_shape=batch_shape, dtype=dtype)
+ matrix = array_ops.zeros(shape=shape, dtype=dtype)
+
+ return operator, matrix
+
+
+if __name__ == "__main__":
+ test.main()
diff --git a/tensorflow/python/kernel_tests/resource_variable_ops_test.py b/tensorflow/python/kernel_tests/resource_variable_ops_test.py
index e358293a90..c739cd2c0d 100644
--- a/tensorflow/python/kernel_tests/resource_variable_ops_test.py
+++ b/tensorflow/python/kernel_tests/resource_variable_ops_test.py
@@ -246,6 +246,15 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
read = resource_variable_ops.read_variable_op(handle, dtype=dtypes.int32)
self.assertEqual(self.evaluate(read), [[2]])
+ def testUseResource(self):
+ v = variables.Variable(1.0, use_resource=True)
+ self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
+
+ def testEagerNoUseResource(self):
+ with context.eager_mode():
+ v = variables.Variable(1.0)
+ self.assertTrue(isinstance(v, resource_variable_ops.ResourceVariable))
+
@test_util.run_in_graph_and_eager_modes
def testScatterMin(self):
with ops.device("cpu:0"):
diff --git a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
index f9b9c77bbf..f2f3023469 100644
--- a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
+++ b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py
@@ -268,12 +268,12 @@ class StatefulScatterNdTest(test.TestCase):
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
- r"Invalid indices: \[0,0\] = \[-1\] does not index into \[6\]"):
+ r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
- r"Invalid indices: \[2,0\] = \[6\] does not index into \[6\]"):
+ r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
@@ -370,6 +370,29 @@ class ScatterNdTest(test.TestCase):
return array_ops.scatter_nd(indices, updates, shape)
@test_util.run_in_graph_and_eager_modes
+ def testBool(self):
+ indices = constant_op.constant(
+ [[4], [3], [1], [7]], dtype=dtypes.int32)
+ updates = constant_op.constant(
+ [False, True, False, True], dtype=dtypes.bool)
+ expected = np.array(
+ [False, False, False, True, False, False, False, True])
+ scatter = self.scatter_nd(indices, updates, shape=(8,))
+ result = self.evaluate(scatter)
+ self.assertAllEqual(expected, result)
+
+ # Same indice is updated twice by same value.
+ indices = constant_op.constant(
+ [[4], [3], [3], [7]], dtype=dtypes.int32)
+ updates = constant_op.constant(
+ [False, True, True, True], dtype=dtypes.bool)
+ expected = np.array([
+ False, False, False, True, False, False, False, True])
+ scatter = self.scatter_nd(indices, updates, shape=(8,))
+ result = self.evaluate(scatter)
+ self.assertAllEqual(expected, result)
+
+ @test_util.run_in_graph_and_eager_modes
def testInvalidShape(self):
# TODO(apassos) figure out how to unify these errors
with self.assertRaises(errors.InvalidArgumentError
diff --git a/tensorflow/python/lib/core/numpy.h b/tensorflow/python/lib/core/numpy.h
index d4621d61ee..0098d938a0 100644
--- a/tensorflow/python/lib/core/numpy.h
+++ b/tensorflow/python/lib/core/numpy.h
@@ -30,9 +30,10 @@ limitations under the License.
#endif
// Place `<locale>` before <Python.h> to avoid build failure in macOS.
-#include <Python.h>
#include <locale>
+#include <Python.h>
+
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
diff --git a/tensorflow/python/lib/core/py_util.cc b/tensorflow/python/lib/core/py_util.cc
index 6b6c82015f..2ee898ea1d 100644
--- a/tensorflow/python/lib/core/py_util.cc
+++ b/tensorflow/python/lib/core/py_util.cc
@@ -16,9 +16,10 @@ limitations under the License.
#include "tensorflow/python/lib/core/py_util.h"
// Place `<locale>` before <Python.h> to avoid build failure in macOS.
-#include <Python.h>
#include <locale>
+#include <Python.h>
+
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/strcat.h"
diff --git a/tensorflow/python/ops/array_grad.py b/tensorflow/python/ops/array_grad.py
index fe459a96b9..a2b5f77f91 100644
--- a/tensorflow/python/ops/array_grad.py
+++ b/tensorflow/python/ops/array_grad.py
@@ -790,7 +790,7 @@ def _ExtractImagePatchesGrad(op, grad):
sp_mat = sparse_tensor.SparseTensor(
array_ops.constant(idx, dtype=ops.dtypes.int64),
- array_ops.ones((len(idx),), dtype=ops.dtypes.float32), sp_shape)
+ array_ops.ones((len(idx),), dtype=grad.dtype), sp_shape)
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index 361667ec49..ec6488ea63 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -636,10 +636,10 @@ def strided_slice(input_,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
- specification shrinks the dimensionality by 1. `begin[i]`, `end[i]` and
- `strides[i]` must imply a slice of size 1 in the dimension. For example in
- Python one might do `foo[:, 3, :]` which would result in
- `shrink_axis_mask` equal to 2.
+ specification shrinks the dimensionality by 1, taking on the value at index
+ `begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
+ Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
+ equal to 2.
NOTE: `begin` and `end` are zero-indexed.
diff --git a/tensorflow/python/ops/cond_v2_impl.py b/tensorflow/python/ops/cond_v2_impl.py
index d310f83dca..5cd0cb34de 100644
--- a/tensorflow/python/ops/cond_v2_impl.py
+++ b/tensorflow/python/ops/cond_v2_impl.py
@@ -135,6 +135,10 @@ def cond_v2(pred, true_fn, false_fn, name="cond"):
def _IfGrad(op, *grads): # pylint: disable=invalid-name
"""The gradient of an If op produced by cond_v2."""
true_graph, false_graph = _get_func_graphs(op)
+ # Note: op.graph != ops.get_default_graph() when we are computing the gradient
+ # of a nested cond.
+ assert true_graph._outer_graph == op.graph
+ assert false_graph._outer_graph == op.graph
# Create grad functions that compute the gradient of the true/false forward
# graphs. These functions will capture tensors from the forward pass
@@ -147,15 +151,16 @@ def _IfGrad(op, *grads): # pylint: disable=invalid-name
assert ([t.dtype for t in true_grad_graph.outputs] ==
[t.dtype for t in false_grad_graph.outputs])
- # Match up the captured grad function inputs with outputs of 'op' and other
- # external tensors.
- true_grad_inputs = _get_grad_inputs(op, true_graph, true_grad_graph)
- false_grad_inputs = _get_grad_inputs(op, false_graph, false_grad_graph)
+ # Resolve references to forward graph tensors in grad graphs and ensure
+ # they are in-scope, i.e., belong to one of outer graphs of the grad graph.
+ true_grad_extra_inputs = _resolve_grad_inputs(true_graph, true_grad_graph)
+ false_grad_extra_inputs = _resolve_grad_inputs(false_graph, false_grad_graph)
# Make the inputs to true_grad_graph and false_grad_graph match. Note that
# this modifies true_grad_graph and false_grad_graph.
grad_inputs = _make_inputs_match(true_grad_graph, false_grad_graph,
- true_grad_inputs, false_grad_inputs)
+ true_grad_extra_inputs,
+ false_grad_extra_inputs)
# Add all intermediate tensors as function outputs so they're available for
# higher-order gradient computations.
@@ -199,11 +204,20 @@ def _get_func_graphs(if_op):
input_shapes = [t.shape for t in extra_inputs]
func_name = if_op.get_attr(branch_name).name
fdef = if_op.graph._get_function(func_name).definition
- func_graph = _function_def_to_graph.function_def_to_graph(
- fdef, input_shapes)
+ # `if_op.graph` may not be the same as `ops.get_default_graph()` e.g.
+ # in the case of nested if ops or when the gradient is being computed
+ # from inside a Defun. We build the `func_graph` with `if_op.graph` as its
+ # `outer_graph`. This resembles how the `_FuncGraph` was built in the
+ # forward pass. We need this so that we can resolve references to tensors
+ # in `func_graph` from its gradient graph in `_resolve_grad_inputs`.
+ with if_op.graph.as_default():
+ func_graph = _function_def_to_graph.function_def_to_graph(
+ fdef, input_shapes)
func_graph.extra_inputs = extra_inputs
func_graph.extra_args = func_graph.inputs
func_graph._captured = dict(zip(extra_inputs, func_graph.inputs))
+ # Set the if op so that the gradient code can use it.
+ func_graph._if = if_op
return func_graph
return (_get_func_graph_for_branch("then_branch"),
@@ -240,7 +254,7 @@ def _grad_fn(func_graph, grads):
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
- # in _get_grad_inputs.
+ # in _resolve_grad_inputs.
result = _gradients_impl._GradientsHelper(
ys, func_graph.inputs, grad_ys=grad_ys,
src_graph=func_graph)
@@ -261,43 +275,49 @@ def _create_grad_func(func_graph, grads, name):
[], [], name)
-def _get_grad_inputs(if_op, cond_graph, grad_graph):
- """Returns the tensors we should pass to grad_graph.
+def _resolve_grad_inputs(cond_graph, grad_graph):
+ """Returns the tensors to pass as `extra_inputs` to `grad_graph`.
- This method handles tensors captured from cond_graph in grad_graph. It
- converts these to suitable input tensors from the outer graph.
+ The `grad_graph` may have external references to
+ 1. Its outer graph containing the input gradients. These references are kept
+ as is.
+ 2. Tensors in the forward pass graph. These tensors may not be "live"
+ when the gradient is being computed. We replace such references by their
+ corresponding tensor in the least common ancestor graph of `grad_graph` and
+ `cond_graph`. Since we export intermediate tensors for all branch
+ functions, this is always possible.
Args:
- if_op: Operation. The forward-pass If op that uses cond_graph.
cond_graph: function._FuncGraph. The forward-pass function.
grad_graph: function._FuncGraph. The gradients function.
Returns:
A list of inputs tensors to be passed to grad_graph.
"""
- inputs = []
-
- # Maps placeholders in cond_graph -> input tensor in outer graph.
- forward_input_map = {v: k for k, v in cond_graph._captured.items()}
+ new_extra_inputs = []
for t in grad_graph.extra_inputs:
- if t.graph == ops.get_default_graph():
- # t is in the outer graph (e.g. one of the input gradients).
- inputs.append(t)
- elif t in forward_input_map:
- # t is an input placeholder in cond_graph. Get the corresponding input
- # tensor in the outer graph.
- assert t.graph == cond_graph
- assert forward_input_map[t].graph == ops.get_default_graph()
- inputs.append(forward_input_map[t])
- else:
- # t is an intermediate value in cond_graph. Get the corresponding output
- # of 'if_op' (note that all intermediate values are outputs).
- assert t.graph == cond_graph
- output_idx = cond_graph.outputs.index(t)
- inputs.append(if_op.outputs[output_idx])
-
- return inputs
+ if t.graph != grad_graph._outer_graph:
+ # `t` is a tensor in `cond_graph` or one of its ancestors. We bubble this
+ # tensor to the least common ancestor of the `cond_graph` and
+ # `grad_graph` so that it is "in-scope" for `grad_graph`.
+ # TODO(srbs): `_is_ancestor` calls may be expensive. Compute the least
+ # common ancestor once and re-use.
+ assert _is_ancestor(cond_graph, t.graph)
+ while not _is_ancestor(grad_graph, t.graph):
+ assert isinstance(t.graph, _function._FuncGraph)
+ if t in t.graph.extra_args:
+ # TODO(srbs): Consider building a map of extra_args -> extra_inputs.
+ # instead of searching for `t` twice.
+ t = t.graph.extra_inputs[t.graph.extra_args.index(t)]
+ else:
+ # Note: All intermediate tensors are output by the If op.
+ # TODO(srbs): .index() calls may be expensive. Optimize.
+ t = t.graph._if.outputs[t.graph.outputs.index(t)]
+ assert _is_ancestor(grad_graph, t.graph)
+ new_extra_inputs.append(t)
+
+ return new_extra_inputs
def _create_new_tf_function(func_graph):
@@ -326,7 +346,8 @@ def _create_new_tf_function(func_graph):
# a new TF_Function that we add to the graph.
fdef = _function.function_def_from_tf_function(c_func)
defined_func = _function._from_definition(fdef)
- defined_func.add_to_graph(ops.get_default_graph())
+ defined_func._sub_functions = func_graph._functions
+ defined_func.add_to_graph(func_graph._outer_graph)
return func_graph.name
@@ -389,7 +410,8 @@ def _pad_params(true_graph, false_graph, true_params, false_params):
return new_true_params, new_false_inputs
-def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
+def _make_inputs_match(true_graph, false_graph, true_extra_inputs,
+ false_extra_inputs):
"""Modifies true_graph and false_graph so they have the same input signature.
This method reorders and/or adds parameters to true_graph and false_graph so
@@ -400,9 +422,9 @@ def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
Args:
true_graph: function._FuncGraph
false_graph: function._FuncGraph
- true_inputs: a list of Tensors in the outer graph. The inputs for
+ true_extra_inputs: a list of Tensors in the outer graph. The inputs for
true_graph.
- false_inputs: a list of Tensors in the outer graph. The inputs for
+ false_extra_inputs: a list of Tensors in the outer graph. The inputs for
false_graph.
Returns:
@@ -411,12 +433,12 @@ def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
false_inputs.
"""
shared_inputs, true_only_inputs, false_only_inputs = _separate_unique_inputs(
- true_inputs, false_inputs)
+ true_extra_inputs, false_extra_inputs)
new_inputs = shared_inputs + true_only_inputs + false_only_inputs
- true_input_to_param = dict(zip(true_inputs, true_graph.inputs))
- false_input_to_param = dict(zip(false_inputs, false_graph.inputs))
+ true_input_to_param = dict(zip(true_extra_inputs, true_graph.inputs))
+ false_input_to_param = dict(zip(false_extra_inputs, false_graph.inputs))
true_graph.inputs = (
[true_input_to_param[t] for t in shared_inputs] +
@@ -432,6 +454,9 @@ def _make_inputs_match(true_graph, false_graph, true_inputs, false_inputs):
true_graph.extra_inputs = new_inputs
false_graph.extra_inputs = new_inputs
+ true_graph.extra_args = true_graph.inputs
+ false_graph.extra_args = false_graph.inputs
+
true_graph._captured = dict(zip(new_inputs, true_graph.inputs))
false_graph._captured = dict(zip(new_inputs, false_graph.inputs))
@@ -454,14 +479,30 @@ def _create_dummy_params(func_graph, template_tensors):
def _get_grad_fn_name(func_graph):
- """Returns a unique name to use for the grad function of `func_graph`."""
+ """Returns a unique name to use for the grad function of `func_graph`.
+
+ Ensures this name is unique in the entire hierarchy.
+
+ Args:
+ func_graph: The _FuncGraph.
+
+ Returns:
+ A string, the name to use for the gradient function.
+ """
name = "%s_grad" % func_graph.name
base_name = name
counter = 1
- if ops.get_default_graph()._is_function(name):
- name = "%s_%s" % (base_name, counter)
- counter += 1
+ has_conflict = True
+ while has_conflict:
+ curr_graph = func_graph._outer_graph
+ has_conflict = curr_graph._is_function(name)
+ while not has_conflict and isinstance(curr_graph, _function._FuncGraph):
+ curr_graph = curr_graph._outer_graph
+ has_conflict = curr_graph._is_function(name)
+ if has_conflict:
+ name = "%s_%s" % (base_name, counter)
+ counter += 1
return name
@@ -477,3 +518,11 @@ def _check_same_outputs(true_graph, false_graph):
"arguments, got:\n"
" true_fn: %s\n"
" false_fn: %s" % (true_output_types, false_output_types))
+
+
+def _is_ancestor(graph, maybe_ancestor):
+ if maybe_ancestor == graph:
+ return True
+ if isinstance(graph, _function._FuncGraph):
+ return _is_ancestor(graph._outer_graph, maybe_ancestor)
+ return False
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 04545cceb7..888075ba2e 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -3146,7 +3146,7 @@ def while_loop(cond,
happen is that the thread updating `x` can never get ahead of the
counter thread because the thread incrementing `x` depends on the value
of the counter.
-
+
```python
import tensorflow as tf
diff --git a/tensorflow/python/ops/conv2d_benchmark.py b/tensorflow/python/ops/conv2d_benchmark.py
index aacdaa7ad0..28111c2730 100644
--- a/tensorflow/python/ops/conv2d_benchmark.py
+++ b/tensorflow/python/ops/conv2d_benchmark.py
@@ -175,7 +175,8 @@ class Conv2DBenchmark(test.Benchmark):
data_types = [dtypes.float32, dtypes.float16]
data_formats = ["NHWC", "NCHW"]
- in_channels = list(range(3, 16))
+ in_channels = list(range(1, 10)) + list(range(10, 20, 2)) + list(
+ range(20, 33, 4))
out_channels = [4, 16, 32]
hw_strides = [[2, 2]]
paddings = ["VALID", "SAME"]
diff --git a/tensorflow/python/ops/histogram_ops_test.py b/tensorflow/python/ops/histogram_ops_test.py
index a226ac81bb..2e57ae8a2d 100644
--- a/tensorflow/python/ops/histogram_ops_test.py
+++ b/tensorflow/python/ops/histogram_ops_test.py
@@ -84,6 +84,23 @@ class HistogramFixedWidthTest(test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
+ def test_with_invalid_value_range(self):
+ values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
+ with self.assertRaisesRegexp(
+ ValueError, "Shape must be rank 1 but is rank 0"):
+ histogram_ops.histogram_fixed_width(values, 1.0)
+ with self.assertRaisesRegexp(ValueError, "Dimension must be 2 but is 3"):
+ histogram_ops.histogram_fixed_width(values, [1.0, 2.0, 3.0])
+
+ def test_with_invalid_nbins(self):
+ values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
+ with self.assertRaisesRegexp(
+ ValueError, "Shape must be rank 0 but is rank 1"):
+ histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=[1, 2])
+ with self.assertRaisesRegexp(
+ ValueError, "Requires nbins > 0"):
+ histogram_ops.histogram_fixed_width(values, [1.0, 5.0], nbins=-5)
+
def test_empty_input_gives_all_zero_counts(self):
# Bins will be:
# (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index 5b384fd596..9440bab9ee 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -1753,6 +1753,22 @@ def is_jpeg(contents, name=None):
return math_ops.equal(substr, b'\xff\xd8\xff', name=name)
+def _is_png(contents, name=None):
+ r"""Convenience function to check if the 'contents' encodes a PNG image.
+
+ Args:
+ contents: 0-D `string`. The encoded image bytes.
+ name: A name for the operation (optional)
+
+ Returns:
+ A scalar boolean tensor indicating if 'contents' may be a PNG image.
+ is_png is susceptible to false positives.
+ """
+ with ops.name_scope(name, 'is_png'):
+ substr = string_ops.substr(contents, 0, 3)
+ return math_ops.equal(substr, b'\211PN', name=name)
+
+
@tf_export('image.decode_image')
def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None):
"""Convenience function for `decode_bmp`, `decode_gif`, `decode_jpeg`,
@@ -1830,8 +1846,8 @@ def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None):
def check_png():
"""Checks if an image is PNG."""
- is_png = math_ops.equal(substr, b'\211PN', name='is_png')
- return control_flow_ops.cond(is_png, _png, check_gif, name='cond_png')
+ return control_flow_ops.cond(
+ _is_png(contents), _png, check_gif, name='cond_png')
def _jpeg():
"""Decodes a jpeg image."""
diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py
index 3132f7467f..c315722b6b 100644
--- a/tensorflow/python/ops/init_ops.py
+++ b/tensorflow/python/ops/init_ops.py
@@ -1136,7 +1136,8 @@ convolutional_orthogonal_3d = ConvolutionOrthogonal3D
# pylint: enable=invalid-name
-@tf_export("glorot_uniform_initializer", "keras.initializers.glorot_uniform")
+@tf_export("glorot_uniform_initializer", "keras.initializers.glorot_uniform",
+ "initializers.glorot_uniform")
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
@@ -1160,7 +1161,8 @@ def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
scale=1.0, mode="fan_avg", distribution="uniform", seed=seed, dtype=dtype)
-@tf_export("glorot_normal_initializer", "keras.initializers.glorot_normal")
+@tf_export("glorot_normal_initializer", "keras.initializers.glorot_normal",
+ "initializers.glorot_normal")
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
@@ -1181,7 +1183,98 @@ def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
An initializer.
"""
return variance_scaling_initializer(
- scale=1.0, mode="fan_avg", distribution="normal", seed=seed, dtype=dtype)
+ scale=1.0,
+ mode="fan_avg",
+ distribution="truncated_normal",
+ seed=seed,
+ dtype=dtype)
+
+
+@tf_export("keras.initializers.lecun_normal", "initializers.lecun_normal")
+def lecun_normal(seed=None):
+ """LeCun normal initializer.
+
+ It draws samples from a truncated normal distribution centered on 0
+ with `stddev = sqrt(1 / fan_in)`
+ where `fan_in` is the number of input units in the weight tensor.
+
+ Arguments:
+ seed: A Python integer. Used to seed the random generator.
+
+ Returns:
+ An initializer.
+
+ References:
+ - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
+ - [Efficient
+ Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
+ """
+ return VarianceScaling(
+ scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
+
+
+@tf_export("keras.initializers.lecun_uniform", "initializers.lecun_uniform")
+def lecun_uniform(seed=None):
+ """LeCun uniform initializer.
+
+ It draws samples from a uniform distribution within [-limit, limit]
+ where `limit` is `sqrt(3 / fan_in)`
+ where `fan_in` is the number of input units in the weight tensor.
+
+ Arguments:
+ seed: A Python integer. Used to seed the random generator.
+
+ Returns:
+ An initializer.
+
+ References:
+ LeCun 98, Efficient Backprop,
+ http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
+ """
+ return VarianceScaling(
+ scale=1., mode="fan_in", distribution="uniform", seed=seed)
+
+
+@tf_export("keras.initializers.he_normal", "initializers.he_normal")
+def he_normal(seed=None):
+ """He normal initializer.
+
+ It draws samples from a truncated normal distribution centered on 0
+ with `stddev = sqrt(2 / fan_in)`
+ where `fan_in` is the number of input units in the weight tensor.
+
+ Arguments:
+ seed: A Python integer. Used to seed the random generator.
+
+ Returns:
+ An initializer.
+
+ References:
+ He et al., http://arxiv.org/abs/1502.01852
+ """
+ return VarianceScaling(
+ scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
+
+
+@tf_export("keras.initializers.he_uniform", "initializers.he_uniform")
+def he_uniform(seed=None):
+ """He uniform variance scaling initializer.
+
+ It draws samples from a uniform distribution within [-limit, limit]
+ where `limit` is `sqrt(6 / fan_in)`
+ where `fan_in` is the number of input units in the weight tensor.
+
+ Arguments:
+ seed: A Python integer. Used to seed the random generator.
+
+ Returns:
+ An initializer.
+
+ References:
+ He et al., http://arxiv.org/abs/1502.01852
+ """
+ return VarianceScaling(
+ scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
diff --git a/tensorflow/python/ops/init_ops_test.py b/tensorflow/python/ops/init_ops_test.py
new file mode 100644
index 0000000000..f6fffa9079
--- /dev/null
+++ b/tensorflow/python/ops/init_ops_test.py
@@ -0,0 +1,196 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for initializers in init_ops."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.eager import context
+from tensorflow.python.framework import ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import resource_variable_ops
+from tensorflow.python.platform import test
+
+
+class InitializersTest(test.TestCase):
+
+ def _runner(self,
+ init,
+ shape,
+ target_mean=None,
+ target_std=None,
+ target_max=None,
+ target_min=None):
+ variable = resource_variable_ops.ResourceVariable(init(shape))
+ if context.executing_eagerly():
+ output = variable.numpy()
+ else:
+ sess = ops.get_default_session()
+ sess.run(variable.initializer)
+ output = sess.run(variable)
+ lim = 3e-2
+ if target_std is not None:
+ self.assertGreater(lim, abs(output.std() - target_std))
+ if target_mean is not None:
+ self.assertGreater(lim, abs(output.mean() - target_mean))
+ if target_max is not None:
+ self.assertGreater(lim, abs(output.max() - target_max))
+ if target_min is not None:
+ self.assertGreater(lim, abs(output.min() - target_min))
+
+ def test_uniform(self):
+ tensor_shape = (9, 6, 7)
+ with self.test_session():
+ self._runner(
+ init_ops.RandomUniform(minval=-1, maxval=1, seed=124),
+ tensor_shape,
+ target_mean=0.,
+ target_max=1,
+ target_min=-1)
+
+ def test_normal(self):
+ tensor_shape = (8, 12, 99)
+ with self.test_session():
+ self._runner(
+ init_ops.RandomNormal(mean=0, stddev=1, seed=153),
+ tensor_shape,
+ target_mean=0.,
+ target_std=1)
+
+ def test_truncated_normal(self):
+ tensor_shape = (12, 99, 7)
+ with self.test_session():
+ self._runner(
+ init_ops.TruncatedNormal(mean=0, stddev=1, seed=126),
+ tensor_shape,
+ target_mean=0.,
+ target_max=2,
+ target_min=-2)
+
+ def test_constant(self):
+ tensor_shape = (5, 6, 4)
+ with self.test_session():
+ self._runner(
+ init_ops.Constant(2),
+ tensor_shape,
+ target_mean=2,
+ target_max=2,
+ target_min=2)
+
+ def test_lecun_uniform(self):
+ tensor_shape = (5, 6, 4, 2)
+ with self.test_session():
+ fan_in, _ = init_ops._compute_fans(tensor_shape)
+ std = np.sqrt(1. / fan_in)
+ self._runner(
+ init_ops.lecun_uniform(seed=123),
+ tensor_shape,
+ target_mean=0.,
+ target_std=std)
+
+ def test_glorot_uniform_initializer(self):
+ tensor_shape = (5, 6, 4, 2)
+ with self.test_session():
+ fan_in, fan_out = init_ops._compute_fans(tensor_shape)
+ std = np.sqrt(2. / (fan_in + fan_out))
+ self._runner(
+ init_ops.glorot_uniform_initializer(seed=123),
+ tensor_shape,
+ target_mean=0.,
+ target_std=std)
+
+ def test_he_uniform(self):
+ tensor_shape = (5, 6, 4, 2)
+ with self.test_session():
+ fan_in, _ = init_ops._compute_fans(tensor_shape)
+ std = np.sqrt(2. / fan_in)
+ self._runner(
+ init_ops.he_uniform(seed=123),
+ tensor_shape,
+ target_mean=0.,
+ target_std=std)
+
+ def test_lecun_normal(self):
+ tensor_shape = (5, 6, 4, 2)
+ with self.test_session():
+ fan_in, _ = init_ops._compute_fans(tensor_shape)
+ std = np.sqrt(1. / fan_in)
+ self._runner(
+ init_ops.lecun_normal(seed=123),
+ tensor_shape,
+ target_mean=0.,
+ target_std=std)
+
+ def test_glorot_normal_initializer(self):
+ tensor_shape = (5, 6, 4, 2)
+ with self.test_session():
+ fan_in, fan_out = init_ops._compute_fans(tensor_shape)
+ std = np.sqrt(2. / (fan_in + fan_out))
+ self._runner(
+ init_ops.glorot_normal_initializer(seed=123),
+ tensor_shape,
+ target_mean=0.,
+ target_std=std)
+
+ def test_he_normal(self):
+ tensor_shape = (5, 6, 4, 2)
+ with self.test_session():
+ fan_in, _ = init_ops._compute_fans(tensor_shape)
+ std = np.sqrt(2. / fan_in)
+ self._runner(
+ init_ops.he_normal(seed=123),
+ tensor_shape,
+ target_mean=0.,
+ target_std=std)
+
+ def test_Orthogonal(self):
+ tensor_shape = (20, 20)
+ with self.test_session():
+ self._runner(init_ops.Orthogonal(seed=123), tensor_shape, target_mean=0.)
+
+ def test_Identity(self):
+ with self.test_session():
+ tensor_shape = (3, 4, 5)
+ with self.assertRaises(ValueError):
+ self._runner(
+ init_ops.Identity(),
+ tensor_shape,
+ target_mean=1. / tensor_shape[0],
+ target_max=1.)
+
+ tensor_shape = (3, 3)
+ self._runner(
+ init_ops.Identity(),
+ tensor_shape,
+ target_mean=1. / tensor_shape[0],
+ target_max=1.)
+
+ def test_Zeros(self):
+ tensor_shape = (4, 5)
+ with self.test_session():
+ self._runner(
+ init_ops.Zeros(), tensor_shape, target_mean=0., target_max=0.)
+
+ def test_Ones(self):
+ tensor_shape = (4, 5)
+ with self.test_session():
+ self._runner(init_ops.Ones(), tensor_shape, target_mean=1., target_max=1.)
+
+
+if __name__ == '__main__':
+ test.main()
diff --git a/tensorflow/python/ops/linalg/linalg.py b/tensorflow/python/ops/linalg/linalg.py
index a7ba0bbe9c..c29b5033bb 100644
--- a/tensorflow/python/ops/linalg/linalg.py
+++ b/tensorflow/python/ops/linalg/linalg.py
@@ -31,6 +31,7 @@ from tensorflow.python.ops.linalg.linear_operator_identity import *
from tensorflow.python.ops.linalg.linear_operator_kronecker import *
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import *
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import *
+from tensorflow.python.ops.linalg.linear_operator_zeros import *
# pylint: enable=wildcard-import
# Seal API.
diff --git a/tensorflow/python/ops/linalg/linear_operator_zeros.py b/tensorflow/python/ops/linalg/linear_operator_zeros.py
new file mode 100644
index 0000000000..b8a79c065b
--- /dev/null
+++ b/tensorflow/python/ops/linalg/linear_operator_zeros.py
@@ -0,0 +1,452 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""`LinearOperator` acting like a zero matrix."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework import errors
+from tensorflow.python.framework import ops
+from tensorflow.python.framework import tensor_shape
+from tensorflow.python.framework import tensor_util
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import check_ops
+from tensorflow.python.ops import control_flow_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops.linalg import linalg_impl as linalg
+from tensorflow.python.ops.linalg import linear_operator
+from tensorflow.python.ops.linalg import linear_operator_util
+from tensorflow.python.util.tf_export import tf_export
+
+__all__ = [
+ "LinearOperatorZeros",
+]
+
+
+@tf_export("linalg.LinearOperatorZeros")
+class LinearOperatorZeros(linear_operator.LinearOperator):
+ """`LinearOperator` acting like a [batch] zero matrix.
+
+ This operator acts like a [batch] zero matrix `A` with shape
+ `[B1,...,Bb, N, M]` for some `b >= 0`. The first `b` indices index a
+ batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
+ an `N x M` matrix. This matrix `A` is not materialized, but for
+ purposes of broadcasting this shape will be relevant.
+
+ `LinearOperatorZeros` is initialized with `num_rows`, and optionally
+ `num_columns, `batch_shape`, and `dtype` arguments. If `num_columns` is
+ `None`, then this operator will be initialized as a square matrix. If
+ `batch_shape` is `None`, this operator efficiently passes through all
+ arguments. If `batch_shape` is provided, broadcasting may occur, which will
+ require making copies.
+
+ ```python
+ # Create a 2 x 2 zero matrix.
+ operator = LinearOperatorZero(num_rows=2, dtype=tf.float32)
+
+ operator.to_dense()
+ ==> [[0., 0.]
+ [0., 0.]]
+
+ operator.shape
+ ==> [2, 2]
+
+ operator.determinant()
+ ==> 0.
+
+ x = ... Shape [2, 4] Tensor
+ operator.matmul(x)
+ ==> Shape [2, 4] Tensor, same as x.
+
+ # Create a 2-batch of 2x2 zero matrices
+ operator = LinearOperatorZeros(num_rows=2, batch_shape=[2])
+ operator.to_dense()
+ ==> [[[0., 0.]
+ [0., 0.]],
+ [[0., 0.]
+ [0., 0.]]]
+
+ # Here, even though the operator has a batch shape, the input is the same as
+ # the output, so x can be passed through without a copy. The operator is able
+ # to detect that no broadcast is necessary because both x and the operator
+ # have statically defined shape.
+ x = ... Shape [2, 2, 3]
+ operator.matmul(x)
+ ==> Shape [2, 2, 3] Tensor, same as tf.zeros_like(x)
+
+ # Here the operator and x have different batch_shape, and are broadcast.
+ # This requires a copy, since the output is different size than the input.
+ x = ... Shape [1, 2, 3]
+ operator.matmul(x)
+ ==> Shape [2, 2, 3] Tensor, equal to tf.zeros_like([x, x])
+ ```
+
+ ### Shape compatibility
+
+ This operator acts on [batch] matrix with compatible shape.
+ `x` is a batch matrix with compatible shape for `matmul` and `solve` if
+
+ ```
+ operator.shape = [B1,...,Bb] + [N, M], with b >= 0
+ x.shape = [C1,...,Cc] + [M, R],
+ and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
+ ```
+
+ #### Matrix property hints
+
+ This `LinearOperator` is initialized with boolean flags of the form `is_X`,
+ for `X = non_singular, self_adjoint, positive_definite, square`.
+ These have the following meaning:
+
+ * If `is_X == True`, callers should expect the operator to have the
+ property `X`. This is a promise that should be fulfilled, but is *not* a
+ runtime assert. For example, finite floating point precision may result
+ in these promises being violated.
+ * If `is_X == False`, callers should expect the operator to not have `X`.
+ * If `is_X == None` (the default), callers should have no expectation either
+ way.
+ """
+
+ def __init__(self,
+ num_rows,
+ num_columns=None,
+ batch_shape=None,
+ dtype=None,
+ is_non_singular=False,
+ is_self_adjoint=True,
+ is_positive_definite=False,
+ is_square=True,
+ assert_proper_shapes=False,
+ name="LinearOperatorZeros"):
+ r"""Initialize a `LinearOperatorZeros`.
+
+ The `LinearOperatorZeros` is initialized with arguments defining `dtype`
+ and shape.
+
+ This operator is able to broadcast the leading (batch) dimensions, which
+ sometimes requires copying data. If `batch_shape` is `None`, the operator
+ can take arguments of any batch shape without copying. See examples.
+
+ Args:
+ num_rows: Scalar non-negative integer `Tensor`. Number of rows in the
+ corresponding zero matrix.
+ num_columns: Scalar non-negative integer `Tensor`. Number of columns in
+ the corresponding zero matrix. If `None`, defaults to the value of
+ `num_rows`.
+ batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading
+ dimensions. If `None`, this operator has no leading dimensions.
+ dtype: Data type of the matrix that this operator represents.
+ is_non_singular: Expect that this operator is non-singular.
+ is_self_adjoint: Expect that this operator is equal to its hermitian
+ transpose.
+ is_positive_definite: Expect that this operator is positive definite,
+ meaning the quadratic form `x^H A x` has positive real part for all
+ nonzero `x`. Note that we do not require the operator to be
+ self-adjoint to be positive-definite. See:
+ https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
+ is_square: Expect that this operator acts like square [batch] matrices.
+ assert_proper_shapes: Python `bool`. If `False`, only perform static
+ checks that initialization and method arguments have proper shape.
+ If `True`, and static checks are inconclusive, add asserts to the graph.
+ name: A name for this `LinearOperator`
+
+ Raises:
+ ValueError: If `num_rows` is determined statically to be non-scalar, or
+ negative.
+ ValueError: If `num_columns` is determined statically to be non-scalar,
+ or negative.
+ ValueError: If `batch_shape` is determined statically to not be 1-D, or
+ negative.
+ ValueError: If any of the following is not `True`:
+ `{is_self_adjoint, is_non_singular, is_positive_definite}`.
+ """
+ dtype = dtype or dtypes.float32
+ self._assert_proper_shapes = assert_proper_shapes
+
+ with ops.name_scope(name):
+ dtype = dtypes.as_dtype(dtype)
+ if not is_self_adjoint and is_square:
+ raise ValueError("A zero operator is always self adjoint.")
+ if is_non_singular:
+ raise ValueError("A zero operator is always singular.")
+ if is_positive_definite:
+ raise ValueError("A zero operator is always not positive-definite.")
+
+ super(LinearOperatorZeros, self).__init__(
+ dtype=dtype,
+ is_non_singular=is_non_singular,
+ is_self_adjoint=is_self_adjoint,
+ is_positive_definite=is_positive_definite,
+ is_square=is_square,
+ name=name)
+
+ self._num_rows = linear_operator_util.shape_tensor(
+ num_rows, name="num_rows")
+ self._num_rows_static = tensor_util.constant_value(self._num_rows)
+
+ if num_columns is None:
+ num_columns = num_rows
+
+ self._num_columns = linear_operator_util.shape_tensor(
+ num_columns, name="num_columns")
+ self._num_columns_static = tensor_util.constant_value(self._num_columns)
+
+ self._check_domain_range_possibly_add_asserts()
+
+ if (self._num_rows_static is not None and
+ self._num_columns_static is not None):
+ if is_square and self._num_rows_static != self._num_columns_static:
+ raise ValueError(
+ "LinearOperatorZeros initialized as is_square=True, but got "
+ "num_rows({}) != num_columns({})".format(
+ self._num_rows_static,
+ self._num_columns_static))
+
+ if batch_shape is None:
+ self._batch_shape_arg = None
+ else:
+ self._batch_shape_arg = linear_operator_util.shape_tensor(
+ batch_shape, name="batch_shape_arg")
+ self._batch_shape_static = tensor_util.constant_value(
+ self._batch_shape_arg)
+ self._check_batch_shape_possibly_add_asserts()
+
+ def _shape(self):
+ matrix_shape = tensor_shape.TensorShape((self._num_rows_static,
+ self._num_columns_static))
+ if self._batch_shape_arg is None:
+ return matrix_shape
+
+ batch_shape = tensor_shape.TensorShape(self._batch_shape_static)
+ return batch_shape.concatenate(matrix_shape)
+
+ def _shape_tensor(self):
+ matrix_shape = array_ops.stack((self._num_rows, self._num_columns), axis=0)
+ if self._batch_shape_arg is None:
+ return matrix_shape
+
+ return array_ops.concat((self._batch_shape_arg, matrix_shape), 0)
+
+ def _assert_non_singular(self):
+ raise errors.InvalidArgumentError(
+ node_def=None, op=None, message="Zero operators are always "
+ "non-invertible.")
+
+ def _assert_positive_definite(self):
+ raise errors.InvalidArgumentError(
+ node_def=None, op=None, message="Zero operators are always "
+ "non-positive definite.")
+
+ def _assert_self_adjoint(self):
+ return control_flow_ops.no_op("assert_self_adjoint")
+
+ def _possibly_broadcast_batch_shape(self, x):
+ """Return 'x', possibly after broadcasting the leading dimensions."""
+ # If we have no batch shape, our batch shape broadcasts with everything!
+ if self._batch_shape_arg is None:
+ return x
+
+ # Static attempt:
+ # If we determine that no broadcast is necessary, pass x through
+ # If we need a broadcast, add to an array of zeros.
+ #
+ # special_shape is the shape that, when broadcast with x's shape, will give
+ # the correct broadcast_shape. Note that
+ # We have already verified the second to last dimension of self.shape
+ # matches x's shape in assert_compatible_matrix_dimensions.
+ # Also, the final dimension of 'x' can have any shape.
+ # Therefore, the final two dimensions of special_shape are 1's.
+ special_shape = self.batch_shape.concatenate([1, 1])
+ bshape = array_ops.broadcast_static_shape(x.get_shape(), special_shape)
+ if special_shape.is_fully_defined():
+ # bshape.is_fully_defined iff special_shape.is_fully_defined.
+ if bshape == x.get_shape():
+ return x
+ # Use the built in broadcasting of addition.
+ zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
+ return x + zeros
+
+ # Dynamic broadcast:
+ # Always add to an array of zeros, rather than using a "cond", since a
+ # cond would require copying data from GPU --> CPU.
+ special_shape = array_ops.concat((self.batch_shape_tensor(), [1, 1]), 0)
+ zeros = array_ops.zeros(shape=special_shape, dtype=self.dtype)
+ return x + zeros
+
+ def _matmul(self, x, adjoint=False, adjoint_arg=False):
+ if self._assert_proper_shapes:
+ x = linalg.adjoint(x) if adjoint_arg else x
+ aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
+ x = control_flow_ops.with_dependencies([aps], x)
+ if self.is_square:
+ # Note that adjoint has no effect since this matrix is self-adjoint.
+ if adjoint_arg:
+ output_shape = array_ops.concat([
+ array_ops.shape(x)[:-2],
+ [array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0)
+ else:
+ output_shape = array_ops.shape(x)
+
+ return self._possibly_broadcast_batch_shape(
+ array_ops.zeros(shape=output_shape, dtype=x.dtype))
+
+ x_shape = array_ops.shape(x)
+ n = self._num_columns if adjoint else self._num_rows
+ m = x_shape[-2] if adjoint_arg else x_shape[-1]
+
+ output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0)
+
+ zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype)
+ return self._possibly_broadcast_batch_shape(zeros)
+
+ def _determinant(self):
+ if self.batch_shape.is_fully_defined():
+ return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype)
+ else:
+ return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
+
+ def _trace(self):
+ # Get Tensor of all zeros of same shape as self.batch_shape.
+ if self.batch_shape.is_fully_defined():
+ return array_ops.zeros(shape=self.batch_shape, dtype=self.dtype)
+ else:
+ return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
+
+ def _diag_part(self):
+ return self._zeros_diag()
+
+ def add_to_tensor(self, mat, name="add_to_tensor"):
+ """Add matrix represented by this operator to `mat`. Equiv to `I + mat`.
+
+ Args:
+ mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
+ name: A name to give this `Op`.
+
+ Returns:
+ A `Tensor` with broadcast shape and same `dtype` as `self`.
+ """
+ return self._possibly_broadcast_batch_shape(mat)
+
+ def _check_domain_range_possibly_add_asserts(self):
+ """Static check of init arg `num_rows`, possibly add asserts."""
+ # Possibly add asserts.
+ if self._assert_proper_shapes:
+ self._num_rows = control_flow_ops.with_dependencies([
+ check_ops.assert_rank(
+ self._num_rows,
+ 0,
+ message="Argument num_rows must be a 0-D Tensor."),
+ check_ops.assert_non_negative(
+ self._num_rows,
+ message="Argument num_rows must be non-negative."),
+ ], self._num_rows)
+ self._num_columns = control_flow_ops.with_dependencies([
+ check_ops.assert_rank(
+ self._num_columns,
+ 0,
+ message="Argument num_columns must be a 0-D Tensor."),
+ check_ops.assert_non_negative(
+ self._num_columns,
+ message="Argument num_columns must be non-negative."),
+ ], self._num_columns)
+
+ # Static checks.
+ if not self._num_rows.dtype.is_integer:
+ raise TypeError("Argument num_rows must be integer type. Found:"
+ " %s" % self._num_rows)
+
+ if not self._num_columns.dtype.is_integer:
+ raise TypeError("Argument num_columns must be integer type. Found:"
+ " %s" % self._num_columns)
+
+ num_rows_static = self._num_rows_static
+ num_columns_static = self._num_columns_static
+
+ if num_rows_static is not None:
+ if num_rows_static.ndim != 0:
+ raise ValueError("Argument num_rows must be a 0-D Tensor. Found:"
+ " %s" % num_rows_static)
+
+ if num_rows_static < 0:
+ raise ValueError("Argument num_rows must be non-negative. Found:"
+ " %s" % num_rows_static)
+ if num_columns_static is not None:
+ if num_columns_static.ndim != 0:
+ raise ValueError("Argument num_columns must be a 0-D Tensor. Found:"
+ " %s" % num_columns_static)
+
+ if num_columns_static < 0:
+ raise ValueError("Argument num_columns must be non-negative. Found:"
+ " %s" % num_columns_static)
+
+ def _check_batch_shape_possibly_add_asserts(self):
+ """Static check of init arg `batch_shape`, possibly add asserts."""
+ if self._batch_shape_arg is None:
+ return
+
+ # Possibly add asserts
+ if self._assert_proper_shapes:
+ self._batch_shape_arg = control_flow_ops.with_dependencies([
+ check_ops.assert_rank(
+ self._batch_shape_arg,
+ 1,
+ message="Argument batch_shape must be a 1-D Tensor."),
+ check_ops.assert_non_negative(
+ self._batch_shape_arg,
+ message="Argument batch_shape must be non-negative."),
+ ], self._batch_shape_arg)
+
+ # Static checks
+ if not self._batch_shape_arg.dtype.is_integer:
+ raise TypeError("Argument batch_shape must be integer type. Found:"
+ " %s" % self._batch_shape_arg)
+
+ if self._batch_shape_static is None:
+ return # Cannot do any other static checks.
+
+ if self._batch_shape_static.ndim != 1:
+ raise ValueError("Argument batch_shape must be a 1-D Tensor. Found:"
+ " %s" % self._batch_shape_static)
+
+ if np.any(self._batch_shape_static < 0):
+ raise ValueError("Argument batch_shape must be non-negative. Found:"
+ "%s" % self._batch_shape_static)
+
+ def _min_matrix_dim(self):
+ """Minimum of domain/range dimension, if statically available, else None."""
+ domain_dim = self.domain_dimension.value
+ range_dim = self.range_dimension.value
+ if domain_dim is None or range_dim is None:
+ return None
+ return min(domain_dim, range_dim)
+
+ def _min_matrix_dim_tensor(self):
+ """Minimum of domain/range dimension, as a tensor."""
+ return math_ops.reduce_min(self.shape_tensor()[-2:])
+
+ def _zeros_diag(self):
+ """Returns the diagonal of this operator as all zeros."""
+ if self.shape.is_fully_defined():
+ d_shape = self.batch_shape.concatenate([self._min_matrix_dim()])
+ else:
+ d_shape = array_ops.concat(
+ [self.batch_shape_tensor(),
+ [self._min_matrix_dim_tensor()]], axis=0)
+
+ return array_ops.zeros(shape=d_shape, dtype=self.dtype)
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index a0dfa543f9..f4a93560be 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -401,7 +401,7 @@ def svd(tensor, full_matrices=False, compute_uv=True, name=None):
import tensorflow as tf
import numpy as np
s, u, v = tf.linalg.svd(a)
- tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
+ tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_b=True))
u, s, v_adj = np.linalg.svd(a, full_matrices=False)
np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
# tf_a_approx and np_a_approx should be numerically close.
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index c28dca5137..fbe6b62302 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -628,16 +628,17 @@ def cast(x, dtype, name=None):
```
The operation supports data types (for `x` and `dtype`) of
- `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `float16`, `float32`,
- `float64`, `complex64`, `complex128`, `bfloat16`. In case of casting from
- complex types (`complex64`, `complex128`) to real types, only the real part
- of `x` is returned. In case of casting from real types to complex types
- (`complex64`, `complex128`), the imaginary part of the returned value is set
- to `0`. The handling of complex types here matches the behavior of numpy.
+ `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
+ `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
+ In case of casting from complex types (`complex64`, `complex128`) to real
+ types, only the real part of `x` is returned. In case of casting from real
+ types to complex types (`complex64`, `complex128`), the imaginary part of the
+ returned value is set to `0`. The handling of complex types here matches the
+ behavior of numpy.
Args:
x: A `Tensor` or `SparseTensor` of numeric type. It could be
- `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`,
+ `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
dtype: The destination type. The list of supported dtypes is the same
as `x`.
diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD
index 065c2caedc..6c804a50e7 100644
--- a/tensorflow/python/ops/parallel_for/BUILD
+++ b/tensorflow/python/ops/parallel_for/BUILD
@@ -125,5 +125,4 @@ cuda_py_test(
"//tensorflow/python:random_ops",
"//tensorflow/python/ops/losses",
],
- tags = ["no_gpu"], # TODO(b/80127739): test is flaky
)
diff --git a/tensorflow/python/ops/parallel_for/__init__.py b/tensorflow/python/ops/parallel_for/__init__.py
index b49d865968..dd8bc6d487 100644
--- a/tensorflow/python/ops/parallel_for/__init__.py
+++ b/tensorflow/python/ops/parallel_for/__init__.py
@@ -23,13 +23,3 @@ from tensorflow.python.ops.parallel_for.control_flow_ops import for_loop
from tensorflow.python.ops.parallel_for.control_flow_ops import pfor
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian
from tensorflow.python.ops.parallel_for.gradients import jacobian
-from tensorflow.python.util.all_util import remove_undocumented
-
-_allowed_symbols = [
- 'pfor',
- 'for_loop',
- 'jacobian',
- 'batch_jacobian',
-]
-
-remove_undocumented(__name__, _allowed_symbols)
diff --git a/tensorflow/python/ops/parallel_for/gradients_test.py b/tensorflow/python/ops/parallel_for/gradients_test.py
index 310a2154f7..3a6d9149ad 100644
--- a/tensorflow/python/ops/parallel_for/gradients_test.py
+++ b/tensorflow/python/ops/parallel_for/gradients_test.py
@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import functools
+import os
import time
import numpy as np
@@ -444,6 +445,10 @@ class GradientsTest(test.TestCase):
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_mnist_per_eg_grad(self):
+ # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+ # configuration of Winograd can cause low precision output resulting in
+ # tests failing. So we disable that here.
+ os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
@@ -451,8 +456,13 @@ class GradientsTest(test.TestCase):
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
4, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+ os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_mnist_per_eg_jacobian(self):
+ # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+ # configuration of Winograd can cause low precision output resulting in
+ # tests failing. So we disable that here.
+ os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
@@ -460,6 +470,7 @@ class GradientsTest(test.TestCase):
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
2, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+ os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_fc_jacobian(self):
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (
diff --git a/tensorflow/python/ops/resource_variable_ops.py b/tensorflow/python/ops/resource_variable_ops.py
index 70a89e5ebb..8b259b6b6b 100644
--- a/tensorflow/python/ops/resource_variable_ops.py
+++ b/tensorflow/python/ops/resource_variable_ops.py
@@ -181,7 +181,8 @@ def shape_safe_assign_variable_handle(handle, shape, value, name=None):
name=name)
-class ResourceVariable(variables.Variable):
+# TODO(apassos) make this be variables.Variable
+class ResourceVariable(variables.RefVariable):
"""Variable based on resource handles.
See the @{$variables$Variables How To} for a high level overview.
@@ -195,15 +196,16 @@ class ResourceVariable(variables.Variable):
the variable are fixed. The value can be changed using one of the assign
methods.
- Just like any `Tensor`, variables created with `ResourceVariable()` can be
- used as inputs for other Ops in the graph. Additionally, all the operators
- overloaded for the `Tensor` class are carried over to variables, so you can
- also add nodes to the graph by just doing arithmetic on variables.
+ Just like any `Tensor`, variables created with
+ `tf.Variable(use_resource=True)` can be used as inputs for other Ops in the
+ graph. Additionally, all the operators overloaded for the `Tensor` class are
+ carried over to variables, so you can also add nodes to the graph by just
+ doing arithmetic on variables.
- Unlike tf.Variable, a tf.ResourceVariable has well-defined semantics. Each
+ Unlike ref-based variable, a ResourceVariable has well-defined semantics. Each
usage of a ResourceVariable in a TensorFlow graph adds a read_value operation
- to the graph. The Tensors returned by a read_value operation are guaranteed
- to see all modifications to the value of the variable which happen in any
+ to the graph. The Tensors returned by a read_value operation are guaranteed to
+ see all modifications to the value of the variable which happen in any
operation on which the read_value depends on (either directly, indirectly, or
via a control dependency) and guaranteed to not see any modification to the
value of the variable from operations that depend on the read_value operation.
@@ -217,7 +219,7 @@ class ResourceVariable(variables.Variable):
can cause tf.Variable and tf.ResourceVariable to behave differently:
```python
- a = tf.ResourceVariable(1.0)
+ a = tf.Variable(1.0, use_resource=True)
a.initializer.run()
assign = a.assign(2.0)
@@ -741,8 +743,14 @@ class ResourceVariable(variables.Variable):
def _read_variable_op(self):
if self.trainable:
tape.watch_variable(self)
- return gen_resource_variable_ops.read_variable_op(self._handle,
- self._dtype)
+ result = gen_resource_variable_ops.read_variable_op(self._handle,
+ self._dtype)
+ if not context.executing_eagerly():
+ # Note that if a control flow context is active the input of the read op
+ # might not actually be the handle. This line bypasses it.
+ tape.record_operation(
+ "ReadVariableOp", [result], [self._handle], lambda x: [x])
+ return result
def read_value(self):
"""Constructs an op which reads the value of this variable.
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index deba133fb9..7096e0dd84 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -417,24 +417,24 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
# Backward direction
if not time_major:
- time_dim = 1
- batch_dim = 0
+ time_axis = 1
+ batch_axis = 0
else:
- time_dim = 0
- batch_dim = 1
+ time_axis = 0
+ batch_axis = 1
- def _reverse(input_, seq_lengths, seq_dim, batch_dim):
+ def _reverse(input_, seq_lengths, seq_axis, batch_axis):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
- seq_dim=seq_dim, batch_dim=batch_dim)
+ seq_axis=seq_axis, batch_axis=batch_axis)
else:
- return array_ops.reverse(input_, axis=[seq_dim])
+ return array_ops.reverse(input_, axis=[seq_axis])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
- seq_dim=time_dim, batch_dim=batch_dim)
+ seq_axis=time_axis, batch_axis=batch_axis)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
@@ -443,7 +443,7 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
- seq_dim=time_dim, batch_dim=batch_dim)
+ seq_axis=time_axis, batch_axis=batch_axis)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
diff --git a/tensorflow/python/ops/tensor_array_ops.py b/tensorflow/python/ops/tensor_array_ops.py
index cc92da4fd7..f86dfb3527 100644
--- a/tensorflow/python/ops/tensor_array_ops.py
+++ b/tensorflow/python/ops/tensor_array_ops.py
@@ -554,7 +554,7 @@ class _EagerTensorArray(object):
self._tensor_array.extend([None for _ in range(index - size + 1)])
if not isinstance(value, ops.EagerTensor):
- value = constant_op.constant(value)
+ value = ops.convert_to_tensor(value)
if self._infer_shape:
if self._element_shape is None:
@@ -633,8 +633,8 @@ class _EagerTensorArray(object):
def split(self, value, lengths, name=None):
"""See TensorArray."""
# error checking to match graph-mode errors
- value = constant_op.constant(value)
- lengths = constant_op.constant(lengths)
+ value = ops.convert_to_tensor(value)
+ lengths = ops.convert_to_tensor(lengths)
sum_lengths = math_ops.reduce_sum(lengths)
if lengths.shape.ndims != 1:
raise errors_impl.InvalidArgumentError(
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index 77f67c18ee..aca44bcd44 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -191,36 +191,9 @@ class _ReuseMode(enum.Enum):
# REUSE_TRUE = 3
-@tf_export("VariableSynchronization")
-class VariableSynchronization(enum.Enum):
- """Indicates when a distributed variable will be synced."""
-
- # Indicates that the synchronization will be determined by the current
- # `DistributionStrategy` (eg. With `MirroredStrategy` this would be
- # `ON_WRITE`).
- AUTO = 0
-
- # Indicates that there will only be one copy of the variable, so there is no
- # need to sync.
- NONE = 1
-
- # Indicates that the variable will be aggregated across devices
- # every time it is updated.
- ON_WRITE = 2
-
- # Indicates that the variable will be aggregated across devices
- # when it is read (eg. when checkpointing or when evaluating an op that uses
- # the variable).
- ON_READ = 3
-
-
-@tf_export("VariableAggregation")
-class VariableAggregation(enum.Enum):
- """Indicates how a distributed variable will be aggregated."""
- NONE = 0
- SUM = 1
- MEAN = 2
-
+# TODO(apassos) remove these forwarding symbols.
+VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
+VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export("AUTO_REUSE").export_constant(__name__, "AUTO_REUSE")
@@ -2376,7 +2349,10 @@ def default_variable_creator(next_creator=None, **kwargs):
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
+ variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
+ expected_shape = kwargs.get("expected_shape", None)
+ import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
@@ -2387,23 +2363,24 @@ def default_variable_creator(next_creator=None, **kwargs):
if use_resource is None:
use_resource = get_variable_scope().use_resource
- if use_resource or (use_resource is None and context.executing_eagerly()):
+ use_resource = use_resource or context.executing_eagerly()
+ if use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
- constraint=constraint)
- elif not use_resource and context.executing_eagerly():
- raise RuntimeError(
- "VariableScope should use resource variable when eager execution is"
- " enabled, but use_resource is False."
- )
+ constraint=constraint, variable_def=variable_def,
+ import_scope=import_scope)
else:
- return variables.Variable(
+ return variables.RefVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype,
- constraint=constraint)
+ constraint=constraint, variable_def=variable_def,
+ expected_shape=expected_shape, import_scope=import_scope)
+
+
+variables.default_variable_creator = default_variable_creator
def _make_getter(captured_getter, captured_previous):
@@ -2411,36 +2388,8 @@ def _make_getter(captured_getter, captured_previous):
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
-def variable(initial_value=None,
- trainable=None,
- collections=None,
- validate_shape=True,
- caching_device=None,
- name=None,
- dtype=None,
- constraint=None,
- use_resource=None,
- synchronization=VariableSynchronization.AUTO,
- aggregation=VariableAggregation.NONE):
- previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
- for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
- previous_getter = _make_getter(getter, previous_getter)
-
- # Reset `aggregation` that is explicitly set as `None` to the enum None value.
- if aggregation is None:
- aggregation = VariableAggregation.NONE
- return previous_getter(
- initial_value=initial_value,
- trainable=trainable,
- collections=collections,
- validate_shape=validate_shape,
- caching_device=caching_device,
- name=name,
- dtype=dtype,
- constraint=constraint,
- use_resource=use_resource,
- synchronization=synchronization,
- aggregation=aggregation)
+# TODO(apassos) remove forwarding symbol
+variable = variables.Variable
@tf_contextlib.contextmanager
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index d3b8da6d2a..fc00ce68ae 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -17,6 +17,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import enum # pylint: disable=g-bad-import-order
+
+import six
+
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
@@ -36,8 +40,101 @@ from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
+def default_variable_creator(_, **kwds):
+ del kwds
+ raise NotImplementedError("variable_scope needs to be imported")
+
+
+def _make_getter(captured_getter, captured_previous):
+ """To avoid capturing loop variables."""
+ def getter(**kwargs):
+ return captured_getter(captured_previous, **kwargs)
+ return getter
+
+
+@tf_export("VariableSynchronization")
+class VariableSynchronization(enum.Enum):
+ """Indicates when a distributed variable will be synced."""
+
+ # Indicates that the synchronization will be determined by the current
+ # `DistributionStrategy` (eg. With `MirroredStrategy` this would be
+ # `ON_WRITE`).
+ AUTO = 0
+
+ # Indicates that there will only be one copy of the variable, so there is no
+ # need to sync.
+ NONE = 1
+
+ # Indicates that the variable will be aggregated across devices
+ # every time it is updated.
+ ON_WRITE = 2
+
+ # Indicates that the variable will be aggregated across devices
+ # when it is read (eg. when checkpointing or when evaluating an op that uses
+ # the variable).
+ ON_READ = 3
+
+
+@tf_export("VariableAggregation")
+class VariableAggregation(enum.Enum):
+ """Indicates how a distributed variable will be aggregated."""
+ NONE = 0
+ SUM = 1
+ MEAN = 2
+
+
+class VariableMetaclass(type):
+ """Metaclass to allow construction of tf.Variable to be overridden."""
+
+ def _variable_call(cls,
+ initial_value=None,
+ trainable=None,
+ collections=None,
+ validate_shape=True,
+ caching_device=None,
+ name=None,
+ variable_def=None,
+ dtype=None,
+ expected_shape=None,
+ import_scope=None,
+ constraint=None,
+ use_resource=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
+ """Call on Variable class. Useful to force the signature."""
+ previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
+ for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
+ previous_getter = _make_getter(getter, previous_getter)
+
+ # Reset `aggregation` that is explicitly set as `None` to the enum NONE.
+ if aggregation is None:
+ aggregation = VariableAggregation.NONE
+ return previous_getter(
+ initial_value=initial_value,
+ trainable=trainable,
+ collections=collections,
+ validate_shape=validate_shape,
+ caching_device=caching_device,
+ name=name,
+ variable_def=variable_def,
+ dtype=dtype,
+ expected_shape=expected_shape,
+ import_scope=import_scope,
+ constraint=constraint,
+ use_resource=use_resource,
+ synchronization=synchronization,
+ aggregation=aggregation)
+
+ def __call__(cls, *args, **kwargs):
+ if cls is Variable:
+ return cls._variable_call(*args, **kwargs)
+ else:
+ return super(VariableMetaclass, cls).__call__(*args, **kwargs)
+
+
@tf_export("Variable")
-class Variable(checkpointable.CheckpointableBase):
+class Variable(six.with_metaclass(VariableMetaclass,
+ checkpointable.CheckpointableBase)):
"""See the @{$variables$Variables How To} for a high level overview.
A variable maintains state in the graph across calls to `run()`. You add a
@@ -123,37 +220,33 @@ class Variable(checkpointable.CheckpointableBase):
various `Optimizer` classes use this collection as the default list of
variables to optimize.
- WARNING: tf.Variable objects have a non-intuitive memory model. A Variable is
- represented internally as a mutable Tensor which can non-deterministically
- alias other Tensors in a graph. The set of operations which consume a Variable
- and can lead to aliasing is undetermined and can change across TensorFlow
- versions. Avoid writing code which relies on the value of a Variable either
- changing or not changing as other operations happen. For example, using
- Variable objects or simple functions thereof as predicates in a `tf.cond` is
- dangerous and error-prone:
+ WARNING: tf.Variable objects by default have a non-intuitive memory model. A
+ Variable is represented internally as a mutable Tensor which can
+ non-deterministically alias other Tensors in a graph. The set of operations
+ which consume a Variable and can lead to aliasing is undetermined and can
+ change across TensorFlow versions. Avoid writing code which relies on the
+ value of a Variable either changing or not changing as other operations
+ happen. For example, using Variable objects or simple functions thereof as
+ predicates in a `tf.cond` is dangerous and error-prone:
```
v = tf.Variable(True)
tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.
```
- Here replacing tf.Variable with tf.contrib.eager.Variable will fix any
- nondeterminism issues.
+ Here replacing adding `use_resource=True` when constructing the variable will
+ fix any nondeterminism issues:
+ ```
+ v = tf.Variable(True, use_resource=True)
+ tf.cond(v, lambda: v.assign(False), my_false_fn)
+ ```
To use the replacement for variables which does
not have these issues:
- * Replace `tf.Variable` with `tf.contrib.eager.Variable`;
+ * Add `use_resource=True` when constructing `tf.Variable`;
* Call `tf.get_variable_scope().set_use_resource(True)` inside a
`tf.variable_scope` before the `tf.get_variable()` call.
-
- @compatibility(eager)
- `tf.Variable` is not compatible with eager execution. Use
- `tf.contrib.eager.Variable` instead which is compatible with both eager
- execution and graph construction. See [the TensorFlow Eager Execution
- guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
- for details on how variables work in eager execution.
- @end_compatibility
"""
def __init__(self,
@@ -167,7 +260,10 @@ class Variable(checkpointable.CheckpointableBase):
dtype=None,
expected_shape=None,
import_scope=None,
- constraint=None):
+ constraint=None,
+ use_resource=None,
+ synchronization=VariableSynchronization.AUTO,
+ aggregation=VariableAggregation.NONE):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
@@ -219,25 +315,565 @@ class Variable(checkpointable.CheckpointableBase):
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
+ use_resource: if True, a ResourceVariable is created; otherwise an
+ old-style ref-based variable is created. When eager execution is enabled
+ a resource variable is always created.
+ synchronization: Indicates when a distributed a variable will be
+ aggregated. Accepted values are constants defined in the class
+ @{tf.VariableSynchronization}. By default the synchronization is set to
+ `AUTO` and the current `DistributionStrategy` chooses
+ when to synchronize. If `synchronization` is set to `ON_READ`,
+ `trainable` must not be set to `True`.
+ aggregation: Indicates how a distributed variable will be aggregated.
+ Accepted values are constants defined in the class
+ @{tf.VariableAggregation}.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
+ """
+ raise NotImplementedError
+
+ def __repr__(self):
+ raise NotImplementedError
+
+ def value(self):
+ """Returns the last snapshot of this variable.
+
+ You usually do not need to call this method as all ops that need the value
+ of the variable call it automatically through a `convert_to_tensor()` call.
+
+ Returns a `Tensor` which holds the value of the variable. You can not
+ assign a new value to this tensor as it is not a reference to the variable.
+
+ To avoid copies, if the consumer of the returned value is on the same device
+ as the variable, this actually returns the live value of the variable, not
+ a copy. Updates to the variable are seen by the consumer. If the consumer
+ is on a different device it will get a copy of the variable.
- @compatibility(eager)
- `tf.Variable` is not compatible with eager execution. Use
- `tfe.Variable` instead which is compatible with both eager execution
- and graph construction. See [the TensorFlow Eager Execution
- guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
- for details on how variables work in eager execution.
- @end_compatibility
+ Returns:
+ A `Tensor` containing the value of the variable.
+ """
+ raise NotImplementedError
+
+ def read_value(self):
+ """Returns the value of this variable, read in the current context.
+
+ Can be different from value() if it's on another device, with control
+ dependencies, etc.
+
+ Returns:
+ A `Tensor` containing the value of the variable.
+ """
+ raise NotImplementedError
+
+ def set_shape(self, shape):
+ """Overrides the shape for this variable.
+
+ Args:
+ shape: the `TensorShape` representing the overridden shape.
+ """
+ raise NotImplementedError
+
+ @property
+ def trainable(self):
+ raise NotImplementedError
+
+ def eval(self, session=None):
+ """In a session, computes and returns the value of this variable.
+
+ This is not a graph construction method, it does not add ops to the graph.
+
+ This convenience method requires a session where the graph
+ containing this variable has been launched. If no session is
+ passed, the default session is used. See @{tf.Session} for more
+ information on launching a graph and on sessions.
+
+ ```python
+ v = tf.Variable([1, 2])
+ init = tf.global_variables_initializer()
+
+ with tf.Session() as sess:
+ sess.run(init)
+ # Usage passing the session explicitly.
+ print(v.eval(sess))
+ # Usage with the default session. The 'with' block
+ # above makes 'sess' the default session.
+ print(v.eval())
+ ```
+
+ Args:
+ session: The session to use to evaluate this variable. If
+ none, the default session is used.
+
+ Returns:
+ A numpy `ndarray` with a copy of the value of this variable.
+ """
+ raise NotImplementedError
+
+ def initialized_value(self):
+ """Returns the value of the initialized variable.
+
+ You should use this instead of the variable itself to initialize another
+ variable with a value that depends on the value of this variable.
+
+ ```python
+ # Initialize 'v' with a random tensor.
+ v = tf.Variable(tf.truncated_normal([10, 40]))
+ # Use `initialized_value` to guarantee that `v` has been
+ # initialized before its value is used to initialize `w`.
+ # The random values are picked only once.
+ w = tf.Variable(v.initialized_value() * 2.0)
+ ```
+
+ Returns:
+ A `Tensor` holding the value of this variable after its initializer
+ has run.
+ """
+ raise NotImplementedError
+
+ @property
+ def initial_value(self):
+ """Returns the Tensor used as the initial value for the variable.
+
+ Note that this is different from `initialized_value()` which runs
+ the op that initializes the variable before returning its value.
+ This method returns the tensor that is used by the op that initializes
+ the variable.
+
+ Returns:
+ A `Tensor`.
+ """
+ raise NotImplementedError
+
+ @property
+ def constraint(self):
+ """Returns the constraint function associated with this variable.
+
+ Returns:
+ The constraint function that was passed to the variable constructor.
+ Can be `None` if no constraint was passed.
+ """
+ raise NotImplementedError
+
+ def assign(self, value, use_locking=False):
+ """Assigns a new value to the variable.
+
+ This is essentially a shortcut for `assign(self, value)`.
+
+ Args:
+ value: A `Tensor`. The new value for this variable.
+ use_locking: If `True`, use locking during the assignment.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the assignment has completed.
+ """
+ raise NotImplementedError
+
+ def assign_add(self, delta, use_locking=False):
+ """Adds a value to this variable.
+
+ This is essentially a shortcut for `assign_add(self, delta)`.
+
+ Args:
+ delta: A `Tensor`. The value to add to this variable.
+ use_locking: If `True`, use locking during the operation.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the addition has completed.
+ """
+ raise NotImplementedError
+
+ def assign_sub(self, delta, use_locking=False):
+ """Subtracts a value from this variable.
+
+ This is essentially a shortcut for `assign_sub(self, delta)`.
+
+ Args:
+ delta: A `Tensor`. The value to subtract from this variable.
+ use_locking: If `True`, use locking during the operation.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the subtraction has completed.
+ """
+ raise NotImplementedError
+
+ def scatter_sub(self, sparse_delta, use_locking=False):
+ """Subtracts `IndexedSlices` from this variable.
+
+ This is essentially a shortcut for `scatter_sub(self, sparse_delta.indices,
+ sparse_delta.values)`.
+
+ Args:
+ sparse_delta: `IndexedSlices` to be subtracted from this variable.
+ use_locking: If `True`, use locking during the operation.
+
+ Returns:
+ A `Tensor` that will hold the new value of this variable after
+ the scattered subtraction has completed.
+
+ Raises:
+ ValueError: if `sparse_delta` is not an `IndexedSlices`.
+ """
+ raise NotImplementedError
+
+ def count_up_to(self, limit):
+ """Increments this variable until it reaches `limit`.
+
+ When that Op is run it tries to increment the variable by `1`. If
+ incrementing the variable would bring it above `limit` then the Op raises
+ the exception `OutOfRangeError`.
+
+ If no error is raised, the Op outputs the value of the variable before
+ the increment.
+
+ This is essentially a shortcut for `count_up_to(self, limit)`.
+
+ Args:
+ limit: value at which incrementing the variable raises an error.
+
+ Returns:
+ A `Tensor` that will hold the variable value before the increment. If no
+ other Op modifies this variable, the values produced will all be
+ distinct.
+ """
+ raise NotImplementedError
+
+ def load(self, value, session=None):
+ """Load new value into this variable.
+
+ Writes new value to variable's memory. Doesn't add ops to the graph.
+
+ This convenience method requires a session where the graph
+ containing this variable has been launched. If no session is
+ passed, the default session is used. See @{tf.Session} for more
+ information on launching a graph and on sessions.
+
+ ```python
+ v = tf.Variable([1, 2])
+ init = tf.global_variables_initializer()
+
+ with tf.Session() as sess:
+ sess.run(init)
+ # Usage passing the session explicitly.
+ v.load([2, 3], sess)
+ print(v.eval(sess)) # prints [2 3]
+ # Usage with the default session. The 'with' block
+ # above makes 'sess' the default session.
+ v.load([3, 4], sess)
+ print(v.eval()) # prints [3 4]
+ ```
+
+ Args:
+ value: New variable value
+ session: The session to use to evaluate this variable. If
+ none, the default session is used.
+
+ Raises:
+ ValueError: Session is not passed and no default session
+ """
+ raise NotImplementedError
+
+ # Conversion to tensor.
+ @staticmethod
+ def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
+ """Utility function for converting a Variable to a Tensor."""
+ _ = name
+ if dtype and not dtype.is_compatible_with(v.dtype):
+ raise ValueError(
+ "Incompatible type conversion requested to type '%s' for variable "
+ "of type '%s'" % (dtype.name, v.dtype.name))
+ if as_ref:
+ return v._ref() # pylint: disable=protected-access
+ else:
+ return v.value()
+
+ @staticmethod
+ def _OverloadAllOperators(): # pylint: disable=invalid-name
+ """Register overloads for all operators."""
+ for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
+ Variable._OverloadOperator(operator)
+ # For slicing, bind getitem differently than a tensor (use SliceHelperVar
+ # instead)
+ # pylint: disable=protected-access
+ setattr(Variable, "__getitem__", array_ops._SliceHelperVar)
+
+ @staticmethod
+ def _OverloadOperator(operator): # pylint: disable=invalid-name
+ """Defer an operator overload to `ops.Tensor`.
+
+ We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
+
+ Args:
+ operator: string. The operator name.
+ """
+
+ def _run_op(a, *args):
+ # pylint: disable=protected-access
+ return getattr(ops.Tensor, operator)(a._AsTensor(), *args)
+ # Propagate __doc__ to wrapper
+ try:
+ _run_op.__doc__ = getattr(ops.Tensor, operator).__doc__
+ except AttributeError:
+ pass
+
+ setattr(Variable, operator, _run_op)
+
+ # NOTE(mrry): This enables the Variable's overloaded "right" binary
+ # operators to run when the left operand is an ndarray, because it
+ # accords the Variable class higher priority than an ndarray, or a
+ # numpy matrix.
+ # TODO(mrry): Convert this to using numpy's __numpy_ufunc__
+ # mechanism, which allows more control over how Variables interact
+ # with ndarrays.
+ __array_priority__ = 100
+
+ @property
+ def name(self):
+ """The name of this variable."""
+ raise NotImplementedError
+
+ @property
+ def initializer(self):
+ """The initializer operation for this variable."""
+ raise NotImplementedError
+
+ @property
+ def device(self):
+ """The device of this variable."""
+ raise NotImplementedError
+
+ @property
+ def dtype(self):
+ """The `DType` of this variable."""
+ raise NotImplementedError
+
+ @property
+ def op(self):
+ """The `Operation` of this variable."""
+ raise NotImplementedError
+
+ @property
+ def graph(self):
+ """The `Graph` of this variable."""
+ raise NotImplementedError
+
+ @property
+ def shape(self):
+ """The `TensorShape` of this variable.
+
+ Returns:
+ A `TensorShape`.
+ """
+ raise NotImplementedError
+
+ def get_shape(self):
+ """Alias of Variable.shape."""
+ raise NotImplementedError
+
+ def to_proto(self, export_scope=None):
+ """Converts a `Variable` to a `VariableDef` protocol buffer.
+
+ Args:
+ export_scope: Optional `string`. Name scope to remove.
+
+ Returns:
+ A `VariableDef` protocol buffer, or `None` if the `Variable` is not
+ in the specified name scope.
+ """
+ raise NotImplementedError
+
+ @staticmethod
+ def from_proto(variable_def, import_scope=None):
+ """Returns a `Variable` object created from `variable_def`."""
+ return RefVariable(variable_def=variable_def,
+ import_scope=import_scope)
+
+ class SaveSliceInfo(object):
+ """Information on how to save this Variable as a slice.
+
+ Provides internal support for saving variables as slices of a larger
+ variable. This API is not public and is subject to change.
+
+ Available properties:
+
+ * full_name
+ * full_shape
+ * var_offset
+ * var_shape
+ """
+
+ def __init__(self,
+ full_name=None,
+ full_shape=None,
+ var_offset=None,
+ var_shape=None,
+ save_slice_info_def=None,
+ import_scope=None):
+ """Create a `SaveSliceInfo`.
+
+ Args:
+ full_name: Name of the full variable of which this `Variable` is a
+ slice.
+ full_shape: Shape of the full variable, as a list of int.
+ var_offset: Offset of this `Variable` into the full variable, as a
+ list of int.
+ var_shape: Shape of this `Variable`, as a list of int.
+ save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
+ recreates the SaveSliceInfo object its contents.
+ `save_slice_info_def` and other arguments are mutually
+ exclusive.
+ import_scope: Optional `string`. Name scope to add. Only used
+ when initializing from protocol buffer.
+ """
+ if save_slice_info_def:
+ assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
+ self.full_name = ops.prepend_name_scope(
+ save_slice_info_def.full_name, import_scope=import_scope)
+ self.full_shape = [i for i in save_slice_info_def.full_shape]
+ self.var_offset = [i for i in save_slice_info_def.var_offset]
+ self.var_shape = [i for i in save_slice_info_def.var_shape]
+ else:
+ self.full_name = full_name
+ self.full_shape = full_shape
+ self.var_offset = var_offset
+ self.var_shape = var_shape
+
+ @property
+ def spec(self):
+ """Computes the spec string used for saving."""
+ full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
+ sl_spec = ":".join([
+ "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
+ ])
+ return full_shape_str + sl_spec
+
+ def to_proto(self, export_scope=None):
+ """Returns a SaveSliceInfoDef() proto.
+
+ Args:
+ export_scope: Optional `string`. Name scope to remove.
+
+ Returns:
+ A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
+ in the specified name scope.
+ """
+ if (export_scope is None or
+ self.full_name.startswith(export_scope)):
+ save_slice_info_def = variable_pb2.SaveSliceInfoDef()
+ save_slice_info_def.full_name = ops.strip_name_scope(
+ self.full_name, export_scope)
+ for i in self.full_shape:
+ save_slice_info_def.full_shape.append(i)
+ for i in self.var_offset:
+ save_slice_info_def.var_offset.append(i)
+ for i in self.var_shape:
+ save_slice_info_def.var_shape.append(i)
+ return save_slice_info_def
+ else:
+ return None
+
+ def __iadd__(self, other):
+ raise NotImplementedError
+
+ def __isub__(self, other):
+ raise NotImplementedError
+
+ def __imul__(self, other):
+ raise NotImplementedError
+
+ def __idiv__(self, other):
+ raise NotImplementedError
+
+ def __itruediv__(self, other):
+ raise NotImplementedError
+
+ def __irealdiv__(self, other):
+ raise NotImplementedError
+
+ def __ipow__(self, other):
+ raise NotImplementedError
+
+
+# TODO(apassos): do not repeat all comments here
+class RefVariable(Variable):
+ """Ref-based implementation of variables."""
+
+ def __init__(self,
+ initial_value=None,
+ trainable=True,
+ collections=None,
+ validate_shape=True,
+ caching_device=None,
+ name=None,
+ variable_def=None,
+ dtype=None,
+ expected_shape=None,
+ import_scope=None,
+ constraint=None):
+ """Creates a new variable with value `initial_value`.
+
+ The new variable is added to the graph collections listed in `collections`,
+ which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
+
+ If `trainable` is `True` the variable is also added to the graph collection
+ `GraphKeys.TRAINABLE_VARIABLES`.
+
+ This constructor creates both a `variable` Op and an `assign` Op to set the
+ variable to its initial value.
+
+ Args:
+ initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
+ which is the initial value for the Variable. The initial value must have
+ a shape specified unless `validate_shape` is set to False. Can also be a
+ callable with no argument that returns the initial value when called. In
+ that case, `dtype` must be specified. (Note that initializer functions
+ from init_ops.py must first be bound to a shape before being used here.)
+ trainable: If `True`, the default, also adds the variable to the graph
+ collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
+ the default list of variables to use by the `Optimizer` classes.
+ collections: List of graph collections keys. The new variable is added to
+ these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
+ validate_shape: If `False`, allows the variable to be initialized with a
+ value of unknown shape. If `True`, the default, the shape of
+ `initial_value` must be known.
+ caching_device: Optional device string describing where the Variable
+ should be cached for reading. Defaults to the Variable's device.
+ If not `None`, caches on another device. Typical use is to cache
+ on the device where the Ops using the Variable reside, to deduplicate
+ copying through `Switch` and other conditional statements.
+ name: Optional name for the variable. Defaults to `'Variable'` and gets
+ uniquified automatically.
+ variable_def: `VariableDef` protocol buffer. If not `None`, recreates
+ the Variable object with its contents, referencing the variable's nodes
+ in the graph, which must already exist. The graph is not changed.
+ `variable_def` and the other arguments are mutually exclusive.
+ dtype: If set, initial_value will be converted to the given type.
+ If `None`, either the datatype will be kept (if `initial_value` is
+ a Tensor), or `convert_to_tensor` will decide.
+ expected_shape: A TensorShape. If set, initial_value is expected
+ to have this shape.
+ import_scope: Optional `string`. Name scope to add to the
+ `Variable.` Only used when initializing from protocol buffer.
+ constraint: An optional projection function to be applied to the variable
+ after being updated by an `Optimizer` (e.g. used to implement norm
+ constraints or value constraints for layer weights). The function must
+ take as input the unprojected Tensor representing the value of the
+ variable and return the Tensor for the projected value
+ (which must have the same shape). Constraints are not safe to
+ use when doing asynchronous distributed training.
+
+ Raises:
+ ValueError: If both `variable_def` and initial_value are specified.
+ ValueError: If the initial value is not specified, or does not have a
+ shape and `validate_shape` is `True`.
+ RuntimeError: If eager execution is enabled.
"""
- if context.executing_eagerly():
- raise RuntimeError(
- "tf.Variable not supported when eager execution is enabled. "
- "Please use tf.contrib.eager.Variable instead")
self._in_graph_mode = True
if variable_def:
# If variable_def is provided, recreates the variable from its fields.
@@ -348,8 +984,7 @@ class Variable(checkpointable.CheckpointableBase):
# Ensure that we weren't lifted into the eager context.
if context.executing_eagerly():
raise RuntimeError(
- "tf.Variable not supported when eager execution is enabled. "
- "Please use tf.contrib.eager.Variable instead")
+ "RefVariable not supported when eager execution is enabled. ")
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
@@ -1068,12 +1703,6 @@ class Variable(checkpointable.CheckpointableBase):
else:
return None
- @staticmethod
- def from_proto(variable_def, import_scope=None):
- """Returns a `Variable` object created from `variable_def`."""
- return Variable(variable_def=variable_def,
- import_scope=import_scope)
-
def __iadd__(self, other):
logging.log_first_n(
logging.WARN,
@@ -1130,90 +1759,6 @@ class Variable(checkpointable.CheckpointableBase):
" if you want a new python Tensor object.", 1)
return self ** other
- class SaveSliceInfo(object):
- """Information on how to save this Variable as a slice.
-
- Provides internal support for saving variables as slices of a larger
- variable. This API is not public and is subject to change.
-
- Available properties:
-
- * full_name
- * full_shape
- * var_offset
- * var_shape
- """
-
- def __init__(self,
- full_name=None,
- full_shape=None,
- var_offset=None,
- var_shape=None,
- save_slice_info_def=None,
- import_scope=None):
- """Create a `SaveSliceInfo`.
-
- Args:
- full_name: Name of the full variable of which this `Variable` is a
- slice.
- full_shape: Shape of the full variable, as a list of int.
- var_offset: Offset of this `Variable` into the full variable, as a
- list of int.
- var_shape: Shape of this `Variable`, as a list of int.
- save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
- recreates the SaveSliceInfo object its contents.
- `save_slice_info_def` and other arguments are mutually
- exclusive.
- import_scope: Optional `string`. Name scope to add. Only used
- when initializing from protocol buffer.
- """
- if save_slice_info_def:
- assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
- self.full_name = ops.prepend_name_scope(
- save_slice_info_def.full_name, import_scope=import_scope)
- self.full_shape = [i for i in save_slice_info_def.full_shape]
- self.var_offset = [i for i in save_slice_info_def.var_offset]
- self.var_shape = [i for i in save_slice_info_def.var_shape]
- else:
- self.full_name = full_name
- self.full_shape = full_shape
- self.var_offset = var_offset
- self.var_shape = var_shape
-
- @property
- def spec(self):
- """Computes the spec string used for saving."""
- full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
- sl_spec = ":".join([
- "%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
- ])
- return full_shape_str + sl_spec
-
- def to_proto(self, export_scope=None):
- """Returns a SaveSliceInfoDef() proto.
-
- Args:
- export_scope: Optional `string`. Name scope to remove.
-
- Returns:
- A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
- in the specified name scope.
- """
- if (export_scope is None or
- self.full_name.startswith(export_scope)):
- save_slice_info_def = variable_pb2.SaveSliceInfoDef()
- save_slice_info_def.full_name = ops.strip_name_scope(
- self.full_name, export_scope)
- for i in self.full_shape:
- save_slice_info_def.full_shape.append(i)
- for i in self.var_offset:
- save_slice_info_def.var_offset.append(i)
- for i in self.var_shape:
- save_slice_info_def.var_shape.append(i)
- return save_slice_info_def
- else:
- return None
-
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
@@ -1230,7 +1775,7 @@ class PartitionedVariable(object):
"""A container for partitioned `Variable` objects.
@compatibility(eager) `tf.PartitionedVariable` is not compatible with
- eager execution. Use `tfe.Variable` instead which is compatible
+ eager execution. Use `tf.Variable` instead which is compatible
with both eager execution and graph construction. See [the
TensorFlow Eager Execution
guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
diff --git a/tensorflow/python/platform/gfile.py b/tensorflow/python/platform/gfile.py
index fd697d70bf..45de047894 100644
--- a/tensorflow/python/platform/gfile.py
+++ b/tensorflow/python/platform/gfile.py
@@ -38,7 +38,14 @@ from tensorflow.python.util.tf_export import tf_export
@tf_export('gfile.GFile', 'gfile.Open')
class GFile(_FileIO):
- """File I/O wrappers without thread locking."""
+ """File I/O wrappers without thread locking.
+
+ Note, that this is somewhat like builtin Python file I/O, but
+ there are semantic differences to make it more efficient for
+ some backing filesystems. For example, a write mode file will
+ not be opened until the first write call (to minimize RPC
+ invocations in network filesystems).
+ """
def __init__(self, name, mode='r'):
super(GFile, self).__init__(name=name, mode=mode)
@@ -46,7 +53,14 @@ class GFile(_FileIO):
@tf_export('gfile.FastGFile')
class FastGFile(_FileIO):
- """File I/O wrappers without thread locking."""
+ """File I/O wrappers without thread locking.
+
+ Note, that this is somewhat like builtin Python file I/O, but
+ there are semantic differences to make it more efficient for
+ some backing filesystems. For example, a write mode file will
+ not be opened until the first write call (to minimize RPC
+ invocations in network filesystems).
+ """
def __init__(self, name, mode='r'):
super(FastGFile, self).__init__(name=name, mode=mode)
diff --git a/tensorflow/python/profiler/model_analyzer_test.py b/tensorflow/python/profiler/model_analyzer_test.py
index f9891f3b1e..c0e16ca536 100644
--- a/tensorflow/python/profiler/model_analyzer_test.py
+++ b/tensorflow/python/profiler/model_analyzer_test.py
@@ -106,7 +106,7 @@ class PrintModelAnalysisTest(test.TestCase):
# Make sure time is profiled.
gap = 1 if test.is_gpu_available() else 2
for i in range(3, 6, gap):
- mat = re.search('(.*)[um]s/(.*)[um]s', metrics[i])
+ mat = re.search('(.*)(?:us|ms|sec)/(.*)(?:us|ms|sec)', metrics[i])
self.assertGreater(float(mat.group(1)), 0.0)
self.assertGreater(float(mat.group(2)), 0.0)
# Make sure device is profiled.
diff --git a/tensorflow/python/profiler/profile_context.py b/tensorflow/python/profiler/profile_context.py
index 18eb66ef98..fa4260a712 100644
--- a/tensorflow/python/profiler/profile_context.py
+++ b/tensorflow/python/profiler/profile_context.py
@@ -88,16 +88,19 @@ def _profiled_run(self,
to_profiles = self.profile_context._profile_candidates()
for to_prof in to_profiles:
cmd, opts, _ = to_prof
+ saved_views = self.profile_context._views.setdefault(cmd, {})
if self.profile_context._debug:
sys.stderr.write('debug: profiling %s step: %d\n' % (cmd, step))
if cmd == 'graph':
- self.profile_context.profiler.profile_graph(opts)
+ saved_views[step] = self.profile_context.profiler.profile_graph(opts)
elif cmd == 'scope':
- self.profile_context.profiler.profile_name_scope(opts)
+ saved_views[step] = self.profile_context.profiler.profile_name_scope(
+ opts)
elif cmd == 'op':
- self.profile_context.profiler.profile_operations(opts)
+ saved_views[step] = self.profile_context.profiler.profile_operations(
+ opts)
elif cmd == 'code':
- self.profile_context.profiler.profile_python(opts)
+ saved_views[step] = self.profile_context.profiler.profile_python(opts)
else:
raise ValueError('Unknown cmd: %s\n' % cmd)
return ret
@@ -185,8 +188,30 @@ class ProfileContext(object):
self._traced_steps = 0
self._auto_profiles = []
self._profiler = None
+ self._views = {}
self._lock = threading.Lock()
+ def get_profiles(self, cmd):
+ """Returns profiling results for each step at which `cmd` was run.
+
+ Args:
+ cmd: string, profiling command used in an `add_auto_profiling` call.
+
+ Returns:
+ dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which
+ the profiling command was run. Values are the outputs of profiling.
+ For "code" and "op" commands this will be a `MultiGraphNodeProto`, for
+ "scope" and "graph" commands this will be a `GraphNodeProto.
+
+ Raises:
+ ValueError: if `cmd` was never run (either because no session.run call was
+ made or because there was no `add_auto_profiling` call with the specified
+ `cmd`.
+ """
+ if cmd not in self._views:
+ raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))
+ return self._views[cmd]
+
def add_auto_profiling(self, cmd, options, profile_steps):
"""Traces and profiles at some session run steps.
diff --git a/tensorflow/python/profiler/profile_context_test.py b/tensorflow/python/profiler/profile_context_test.py
index a623beee23..107ad443c3 100644
--- a/tensorflow/python/profiler/profile_context_test.py
+++ b/tensorflow/python/profiler/profile_context_test.py
@@ -61,6 +61,8 @@ class ProfilerContextTest(test.TestCase):
profile_str = f.read()
gfile.Remove(outfile)
+ self.assertEqual(set([15, 50, 100]), set(pctx.get_profiles("op").keys()))
+
with lib.ProfilerFromFile(
os.path.join(test.get_temp_dir(), "profile_100")) as profiler:
profiler.profile_operations(options=opts)
diff --git a/tensorflow/tools/api/generator/BUILD b/tensorflow/python/tools/api/generator/BUILD
index 8c760e6f52..223d1281ba 100644
--- a/tensorflow/tools/api/generator/BUILD
+++ b/tensorflow/python/tools/api/generator/BUILD
@@ -3,8 +3,9 @@
licenses(["notice"]) # Apache 2.0
-load("//tensorflow/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
-load("//tensorflow/tools/api/generator:api_gen.bzl", "TENSORFLOW_API_INIT_FILES")
+load("//tensorflow:tensorflow.bzl", "py_test")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "ESTIMATOR_API_INIT_FILES")
+load("//tensorflow/python/tools/api/generator:api_gen.bzl", "TENSORFLOW_API_INIT_FILES")
exports_files(
[
@@ -13,6 +14,18 @@ exports_files(
],
)
+py_binary(
+ name = "create_python_api",
+ srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"],
+ main = "//tensorflow/python/tools/api/generator:create_python_api.py",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+ deps = [
+ "//tensorflow/python:no_contrib",
+ "//tensorflow/python/tools/api/generator:doc_srcs",
+ ],
+)
+
py_library(
name = "doc_srcs",
srcs = ["doc_srcs.py"],
diff --git a/tensorflow/tools/api/generator/api_gen.bzl b/tensorflow/python/tools/api/generator/api_gen.bzl
index ed164bf9e4..00e1c4e199 100644
--- a/tensorflow/tools/api/generator/api_gen.bzl
+++ b/tensorflow/python/tools/api/generator/api_gen.bzl
@@ -102,37 +102,41 @@ ESTIMATOR_API_INIT_FILES = [
# END GENERATED ESTIMATOR FILES
]
-# Creates a genrule that generates a directory structure with __init__.py
-# files that import all exported modules (i.e. modules with tf_export
-# decorators).
-#
-# Args:
-# name: name of genrule to create.
-# output_files: List of __init__.py files that should be generated.
-# This list should include file name for every module exported using
-# tf_export. For e.g. if an op is decorated with
-# @tf_export('module1.module2', 'module3'). Then, output_files should
-# include module1/module2/__init__.py and module3/__init__.py.
-# root_init_template: Python init file that should be used as template for
-# root __init__.py file. "# API IMPORTS PLACEHOLDER" comment inside this
-# template will be replaced with root imports collected by this genrule.
-# srcs: genrule sources. If passing root_init_template, the template file
-# must be included in sources.
-# api_name: Name of the project that you want to generate API files for
-# (e.g. "tensorflow" or "estimator").
-# package: Python package containing the @tf_export decorators you want to
-# process
-# package_dep: Python library target containing your package.
-
def gen_api_init_files(
name,
output_files = TENSORFLOW_API_INIT_FILES,
root_init_template = None,
srcs = [],
api_name = "tensorflow",
+ api_version = 2,
package = "tensorflow.python",
package_dep = "//tensorflow/python:no_contrib",
output_package = "tensorflow"):
+ """Creates API directory structure and __init__.py files.
+
+ Creates a genrule that generates a directory structure with __init__.py
+ files that import all exported modules (i.e. modules with tf_export
+ decorators).
+
+ Args:
+ name: name of genrule to create.
+ output_files: List of __init__.py files that should be generated.
+ This list should include file name for every module exported using
+ tf_export. For e.g. if an op is decorated with
+ @tf_export('module1.module2', 'module3'). Then, output_files should
+ include module1/module2/__init__.py and module3/__init__.py.
+ root_init_template: Python init file that should be used as template for
+ root __init__.py file. "# API IMPORTS PLACEHOLDER" comment inside this
+ template will be replaced with root imports collected by this genrule.
+ srcs: genrule sources. If passing root_init_template, the template file
+ must be included in sources.
+ api_name: Name of the project that you want to generate API files for
+ (e.g. "tensorflow" or "estimator").
+ api_version: TensorFlow API version to generate. Must be either 1 or 2.
+ package: Python package containing the @tf_export decorators you want to
+ process
+ package_dep: Python library target containing your package.
+ """
root_init_template_flag = ""
if root_init_template:
root_init_template_flag = "--root_init_template=$(location " + root_init_template + ")"
@@ -140,13 +144,14 @@ def gen_api_init_files(
api_gen_binary_target = "create_" + package + "_api"
native.py_binary(
name = "create_" + package + "_api",
- srcs = ["//tensorflow/tools/api/generator:create_python_api.py"],
- main = "//tensorflow/tools/api/generator:create_python_api.py",
+ srcs = ["//tensorflow/python/tools/api/generator:create_python_api.py"],
+ main = "//tensorflow/python/tools/api/generator:create_python_api.py",
srcs_version = "PY2AND3",
visibility = ["//visibility:public"],
deps = [
package_dep,
- "//tensorflow/tools/api/generator:doc_srcs",
+ "//tensorflow/python:util",
+ "//tensorflow/python/tools/api/generator:doc_srcs",
],
)
@@ -156,8 +161,8 @@ def gen_api_init_files(
cmd = (
"$(location :" + api_gen_binary_target + ") " +
root_init_template_flag + " --apidir=$(@D) --apiname=" +
- api_name + " --package=" + package + " --output_package=" +
- output_package + " $(OUTS)"),
+ api_name + " --apiversion=" + str(api_version) + " --package=" + package +
+ " --output_package=" + output_package + " $(OUTS)"),
srcs = srcs,
tools = [":" + api_gen_binary_target ],
visibility = ["//tensorflow:__pkg__"],
diff --git a/tensorflow/tools/api/generator/create_python_api.py b/tensorflow/python/tools/api/generator/create_python_api.py
index 7f17360c91..863c922216 100644
--- a/tensorflow/tools/api/generator/create_python_api.py
+++ b/tensorflow/python/tools/api/generator/create_python_api.py
@@ -24,11 +24,12 @@ import importlib
import os
import sys
+from tensorflow.python.tools.api.generator import doc_srcs
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
-from tensorflow.tools.api.generator import doc_srcs
API_ATTRS = tf_export.API_ATTRS
+API_ATTRS_V1 = tf_export.API_ATTRS_V1
_DEFAULT_PACKAGE = 'tensorflow.python'
_GENFILES_DIR_SUFFIX = 'genfiles/'
@@ -38,7 +39,7 @@ _SYMBOLS_TO_SKIP_EXPLICITLY = {
'tensorflow.python.platform.flags.FLAGS'
}
_GENERATED_FILE_HEADER = """# This file is MACHINE GENERATED! Do not edit.
-# Generated by: tensorflow/tools/api/generator/create_python_api.py script.
+# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
\"\"\"%s
\"\"\"
@@ -159,13 +160,16 @@ __all__.remove('print_function')
return module_text_map
-def get_api_init_text(package, output_package, api_name):
+def get_api_init_text(package, output_package, api_name, api_version):
"""Get a map from destination module to __init__.py code for that module.
Args:
package: Base python package containing python with target tf_export
decorators.
+ output_package: Base output python package where generated API will
+ be added.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
+ api_version: API version you want to generate (`v1` or `v2`).
Returns:
A dictionary where
@@ -173,6 +177,12 @@ def get_api_init_text(package, output_package, api_name):
value: (string) text that should be in __init__.py files for
corresponding modules.
"""
+ if api_version == 1:
+ names_attr = API_ATTRS_V1[api_name].names
+ constants_attr = API_ATTRS_V1[api_name].constants
+ else:
+ names_attr = API_ATTRS[api_name].names
+ constants_attr = API_ATTRS[api_name].constants
module_code_builder = _ModuleInitCodeBuilder()
# Traverse over everything imported above. Specifically,
@@ -193,7 +203,7 @@ def get_api_init_text(package, output_package, api_name):
attr = getattr(module, module_contents_name)
# If attr is _tf_api_constants attribute, then add the constants.
- if module_contents_name == API_ATTRS[api_name].constants:
+ if module_contents_name == constants_attr:
for exports, value in attr:
for export in exports:
names = export.split('.')
@@ -205,9 +215,8 @@ def get_api_init_text(package, output_package, api_name):
_, attr = tf_decorator.unwrap(attr)
# If attr is a symbol with _tf_api_names attribute, then
# add import for it.
- if (hasattr(attr, '__dict__') and
- API_ATTRS[api_name].names in attr.__dict__):
- for export in getattr(attr, API_ATTRS[api_name].names): # pylint: disable=protected-access
+ if (hasattr(attr, '__dict__') and names_attr in attr.__dict__):
+ for export in getattr(attr, names_attr): # pylint: disable=protected-access
names = export.split('.')
dest_module = '.'.join(names[:-1])
module_code_builder.add_import(
@@ -297,7 +306,7 @@ def get_module_docstring(module_name, package, api_name):
def create_api_files(
output_files, package, root_init_template, output_dir, output_package,
- api_name):
+ api_name, api_version):
"""Creates __init__.py files for the Python API.
Args:
@@ -309,7 +318,9 @@ def create_api_files(
"#API IMPORTS PLACEHOLDER" comment in the template file will be replaced
with imports.
output_dir: output API root directory.
+ output_package: Base output package where generated API will be added.
api_name: API you want to generate (e.g. `tensorflow` or `estimator`).
+ api_version: API version to generate (`v1` or `v2`).
Raises:
ValueError: if an output file is not under api/ directory,
@@ -326,7 +337,8 @@ def create_api_files(
os.makedirs(os.path.dirname(file_path))
open(file_path, 'a').close()
- module_text_map = get_api_init_text(package, output_package, api_name)
+ module_text_map = get_api_init_text(
+ package, output_package, api_name, api_version)
# Add imports to output files.
missing_output_files = []
@@ -385,6 +397,10 @@ def main():
choices=API_ATTRS.keys(),
help='The API you want to generate.')
parser.add_argument(
+ '--apiversion', default=2, type=int,
+ choices=[1, 2],
+ help='The API version you want to generate.')
+ parser.add_argument(
'--output_package', default='tensorflow', type=str,
help='Root output package.')
@@ -401,7 +417,8 @@ def main():
# Populate `sys.modules` with modules containing tf_export().
importlib.import_module(args.package)
create_api_files(outputs, args.package, args.root_init_template,
- args.apidir, args.output_package, args.apiname)
+ args.apidir, args.output_package, args.apiname,
+ args.apiversion)
if __name__ == '__main__':
diff --git a/tensorflow/tools/api/generator/create_python_api_test.py b/tensorflow/python/tools/api/generator/create_python_api_test.py
index 1a7187463a..a565a49d96 100644
--- a/tensorflow/tools/api/generator/create_python_api_test.py
+++ b/tensorflow/python/tools/api/generator/create_python_api_test.py
@@ -22,8 +22,8 @@ import imp
import sys
from tensorflow.python.platform import test
+from tensorflow.python.tools.api.generator import create_python_api
from tensorflow.python.util.tf_export import tf_export
-from tensorflow.tools.api.generator import create_python_api
@tf_export('test_op', 'test_op1')
@@ -59,7 +59,7 @@ class CreatePythonApiTest(test.TestCase):
imports = create_python_api.get_api_init_text(
package=create_python_api._DEFAULT_PACKAGE,
output_package='tensorflow',
- api_name='tensorflow')
+ api_name='tensorflow', api_version=1)
expected_import = (
'from tensorflow.python.test_module '
'import test_op as test_op1')
@@ -77,7 +77,7 @@ class CreatePythonApiTest(test.TestCase):
imports = create_python_api.get_api_init_text(
package=create_python_api._DEFAULT_PACKAGE,
output_package='tensorflow',
- api_name='tensorflow')
+ api_name='tensorflow', api_version=2)
expected_import = ('from tensorflow.python.test_module '
'import TestClass')
self.assertTrue(
@@ -88,7 +88,7 @@ class CreatePythonApiTest(test.TestCase):
imports = create_python_api.get_api_init_text(
package=create_python_api._DEFAULT_PACKAGE,
output_package='tensorflow',
- api_name='tensorflow')
+ api_name='tensorflow', api_version=1)
expected = ('from tensorflow.python.test_module '
'import _TEST_CONSTANT')
self.assertTrue(expected in str(imports),
diff --git a/tensorflow/tools/api/generator/doc_srcs.py b/tensorflow/python/tools/api/generator/doc_srcs.py
index ad1988494d..ad1988494d 100644
--- a/tensorflow/tools/api/generator/doc_srcs.py
+++ b/tensorflow/python/tools/api/generator/doc_srcs.py
diff --git a/tensorflow/tools/api/generator/doc_srcs_test.py b/tensorflow/python/tools/api/generator/doc_srcs_test.py
index dbff904abe..481d9874a4 100644
--- a/tensorflow/tools/api/generator/doc_srcs_test.py
+++ b/tensorflow/python/tools/api/generator/doc_srcs_test.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
-"""Tests for tensorflow.tools.api.generator.doc_srcs."""
+"""Tests for tensorflow.python.tools.api.generator.doc_srcs."""
from __future__ import absolute_import
from __future__ import division
@@ -23,7 +23,7 @@ import importlib
import sys
from tensorflow.python.platform import test
-from tensorflow.tools.api.generator import doc_srcs
+from tensorflow.python.tools.api.generator import doc_srcs
FLAGS = None
diff --git a/tensorflow/python/training/adam.py b/tensorflow/python/training/adam.py
index b65c88e972..bcbe5907d6 100644
--- a/tensorflow/python/training/adam.py
+++ b/tensorflow/python/training/adam.py
@@ -109,12 +109,13 @@ class AdamOptimizer(optimizer.Optimizer):
self._updated_lr = None
def _get_beta_accumulators(self):
- if context.executing_eagerly():
- graph = None
- else:
- graph = ops.get_default_graph()
- return (self._get_non_slot_variable("beta1_power", graph=graph),
- self._get_non_slot_variable("beta2_power", graph=graph))
+ with ops.init_scope():
+ if context.executing_eagerly():
+ graph = None
+ else:
+ graph = ops.get_default_graph()
+ return (self._get_non_slot_variable("beta1_power", graph=graph),
+ self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
diff --git a/tensorflow/python/training/adam_test.py b/tensorflow/python/training/adam_test.py
index ccdc7e384d..8f84427654 100644
--- a/tensorflow/python/training/adam_test.py
+++ b/tensorflow/python/training/adam_test.py
@@ -315,6 +315,12 @@ class AdamOptimizerTest(test.TestCase):
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
+
+ with context.eager_mode():
+ var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
+ grads0 = constant_op.constant(np.array([0.1, 0.1]))
+ optimizer.apply_gradients([(grads0, var0)])
+
g = ops.Graph()
with g.as_default():
with session.Session():
diff --git a/tensorflow/python/training/checkpoint_utils.py b/tensorflow/python/training/checkpoint_utils.py
index 5b372e82b3..883f4fd910 100644
--- a/tensorflow/python/training/checkpoint_utils.py
+++ b/tensorflow/python/training/checkpoint_utils.py
@@ -29,6 +29,7 @@ from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import tf_export
@@ -179,6 +180,16 @@ def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
ValueError: If missing variables in current graph.
"""
+ if distribute_lib.get_cross_tower_context():
+ _init_from_checkpoint(None, ckpt_dir_or_file, assignment_map)
+ else:
+ distribute_lib.get_tower_context().merge_call(
+ _init_from_checkpoint, ckpt_dir_or_file, assignment_map)
+
+
+def _init_from_checkpoint(_, ckpt_dir_or_file, assignment_map):
+ """See `init_from_checkpoint` for documentation."""
+
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
@@ -187,10 +198,9 @@ def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
- is_var = lambda x: isinstance(x, variables.Variable)
- if is_var(current_var_or_name) or (
+ if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
- and all(is_var(v) for v in current_var_or_name)):
+ and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
@@ -205,7 +215,7 @@ def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
- if is_var(var):
+ if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
@@ -297,13 +307,34 @@ def _set_checkpoint_initializer(variable,
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
- if isinstance(variable, resource_variable_ops.ResourceVariable):
+
+ # TODO(priyag, allenl): Use `SaveableObject.restore` instead here.
+ if resource_variable_ops.is_resource_variable(variable):
init_op = variable.assign(restore_op, read_value=False)
else:
init_op = state_ops.assign(variable, restore_op)
- variable._initializer_op = init_op # pylint:disable=protected-access
- restore_op.set_shape(variable.shape)
- variable._initial_value = restore_op # pylint:disable=protected-access
+
+ # pylint:disable=protected-access
+ # We need special handling for `DistributedVariable`s as they contain
+ # mutliple actual variables. `assign` on a `DistributedVariable` returns a
+ # combined `init_op` which contains initializers for all the contained
+ # variables. We then set each underlying variable's `_initializer_op` using
+ # the corresponding `init_op`.
+ # TODO(priyag): Use `isinstance` checks when `DistributedVariable` class
+ # moves out of contrib.
+ if any(base.__name__ == "DistributedVariable"
+ for base in variable.__class__.__bases__):
+ assert distribute_lib.get_cross_tower_context()
+ assert hasattr(variable, "_index")
+ for (d, v) in six.iteritems(variable._index):
+ v._initializer_op = init_op._index[d]
+ restore_op.set_shape(v.shape)
+ v._initial_value = restore_op
+ else:
+ variable._initializer_op = init_op
+ restore_op.set_shape(variable.shape)
+ variable._initial_value = restore_op
+ # pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
@@ -337,6 +368,11 @@ def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
+def _is_variable(x):
+ return (isinstance(x, variables.Variable) or
+ resource_variable_ops.is_resource_variable(x))
+
+
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
diff --git a/tensorflow/python/training/checkpointable/data_structures.py b/tensorflow/python/training/checkpointable/data_structures.py
index 019d43f09c..507cda8734 100644
--- a/tensorflow/python/training/checkpointable/data_structures.py
+++ b/tensorflow/python/training/checkpointable/data_structures.py
@@ -57,6 +57,8 @@ def _wrap_or_unwrap(value):
return value.value
if isinstance(value, base.CheckpointableBase):
return value # Skip conversion for already checkpointable objects.
+ elif isinstance(value, dict):
+ return _DictWrapper(value)
elif isinstance(value, list):
return _ListWrapper(value)
else:
@@ -438,12 +440,15 @@ class Mapping(CheckpointableDataStructure, collections.Mapping):
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `dict()`."""
super(Mapping, self).__init__()
- self._storage = dict(*args, **kwargs)
+ self._storage = self._make_storage(*args, **kwargs)
self._storage.update(
{key: self._track_value(
value, name=self._name_element(key))
for key, value in self._storage.items()})
+ def _make_storage(self, *args, **kwargs):
+ return dict(*args, **kwargs)
+
def _name_element(self, key):
if not isinstance(key, six.string_types):
raise TypeError(
@@ -476,3 +481,185 @@ class Mapping(CheckpointableDataStructure, collections.Mapping):
def __iter__(self):
return iter(self._storage)
+
+
+# Unlike _ListWrapper, having _DictWrapper inherit from dict and pass isinstance
+# checks seems infeasible. CPython will not call Python methods/properties on
+# dictionary subclasses when running e.g. {}.update(dict_subclass), and instead
+# collects elements directly from dict_subclass's C structs. So subclassing dict
+# implies that the storage has to be "self" (i.e. the C structs for the object
+# must be updated correctly), but we also need that storage to be the wrapped
+# dictionary to avoid synchronization bugs (un-tracked external modifications
+# should still show up when the dict is accessed through the wrapper). Monkey
+# patching all of the "wrapped" dict's methods instead of creating a wrapper
+# object is an option, but not a very attractive one (replacing methods without
+# creating reference cycles is difficult, and then dicts would need to be
+# special cased everywhere as being checkpointable).
+class _DictWrapper(Mapping, collections.MutableMapping):
+ """Wraps built-in dicts to support restore-on-create for variables.
+
+ _DictWrapper is to Mapping as _ListWrapper is to List. Unlike Mapping,
+ _DictWrapper allows non-string keys and values and arbitrary mutations (delete
+ keys, reassign values). Like _ListWrapper, these mutations mean that
+ _DictWrapper will raise an exception on save.
+ """
+
+ def __new__(cls, *args):
+ if len(args) == 1 and isinstance(args[0], dict):
+ return super(_DictWrapper, cls).__new__(cls)
+ else:
+ # Allow construction from a sequence, e.g. for nest.pack_sequence_as. In
+ # this case there's nothing to wrap, so we make a normal dictionary. Also
+ # allows constructing empty instances of the _DictWrapper type, as Session
+ # is wont to do (and again there's nothing to wrap, so a normal dictionary
+ # makes more sense).
+ return dict(*args)
+
+ def __init__(self, wrapped_dict):
+ self._non_string_key = False
+ self._non_append_mutation = False
+ self._external_modification = False
+ super(_DictWrapper, self).__init__(wrapped_dict)
+ self._update_snapshot()
+
+ def _make_storage(self, wrapped_dict):
+ """Re-use the wrapped dict for storage (to force them to be in sync)."""
+ return wrapped_dict
+
+ @property
+ def _checkpoint_dependencies(self):
+ """Check that the object is saveable before listing its dependencies."""
+ self._check_external_modification()
+ if self._non_string_key:
+ raise ValueError(
+ "Unable to save the object %s (a dictionary wrapper constructed "
+ "automatically on attribute assignment). The wrapped dictionary "
+ "contains a non-string key which maps to a checkpointable object or "
+ "mutable data structure.\n\nIf you don't need this dictionary "
+ "checkpointed, wrap it in a tf.contrib.checkpoint.NoDependency "
+ "object; it will be automatically un-wrapped and subsequently "
+ "ignored." % (self,))
+ if self._non_append_mutation:
+ raise ValueError(
+ "Unable to save the object %s (a dictionary wrapper constructed "
+ "automatically on attribute assignment). A key mapping to a "
+ "checkpointable object was overwritten or deleted, which would "
+ "cause problems for restoration.\n\nIf you don't need this "
+ "dictionary checkpointed, wrap it in a "
+ "tf.contrib.checkpoint.NoDependency object; it will be automatically "
+ "un-wrapped and subsequently ignored." % (self,))
+ if self._external_modification:
+ raise ValueError(
+ "Unable to save the object %s (a dictionary wrapper constructed "
+ "automatically on attribute assignment). The wrapped dictionary was "
+ "modified outside the wrapper (its final value was %s, its value "
+ "when a checkpoint dependency was added was %s), which breaks "
+ "restoration on object creation.\n\nIf you don't need this "
+ "dictionary checkpointed, wrap it in a "
+ "tf.contrib.checkpoint.NoDependency object; it will be automatically "
+ "un-wrapped and subsequently ignored." % (
+ self, self, self._last_wrapped_dict_snapshot))
+ assert not self._dirty # Any reason for dirtiness should have an exception.
+ return super(_DictWrapper, self)._checkpoint_dependencies
+
+ @property
+ def _dirty(self):
+ """Check if there has already been a mutation which prevents saving."""
+ return (self._external_modification
+ or self._non_append_mutation
+ or self._non_string_key)
+
+ def _check_external_modification(self):
+ """Checks for any changes to the wrapped dict not through the wrapper."""
+ if self._dirty:
+ return
+ if self != self._last_wrapped_dict_snapshot:
+ self._external_modification = True
+ self._last_wrapped_dict_snapshot = None
+
+ def _update_snapshot(self):
+ """Acknowledges tracked changes to the wrapped dict."""
+ if self._dirty:
+ return
+ self._last_wrapped_dict_snapshot = dict(self)
+
+ def _track_value(self, value, name):
+ """Allows storage of non-checkpointable objects."""
+ if isinstance(name, six.string_types):
+ string_key = True
+ else:
+ name = "-non_string_key"
+ string_key = False
+ try:
+ no_dependency = isinstance(value, NoDependency)
+ value = super(_DictWrapper, self)._track_value(value=value, name=name)
+ if not (string_key or no_dependency):
+ # A non-string key maps to a checkpointable value. This data structure
+ # is not saveable.
+ self._non_string_key = True
+ return value
+ except ValueError:
+ # Even if this value isn't checkpointable, we need to make sure
+ # NoDependency objects get unwrapped.
+ return sticky_attribute_assignment(
+ checkpointable=self, value=value, name=name)
+
+ def _name_element(self, key):
+ """Don't throw errors for non-string keys."""
+ if isinstance(key, six.string_types):
+ return super(_DictWrapper, self)._name_element(key)
+ else:
+ return key
+
+ def __setitem__(self, key, value):
+ """Allow any modifications, but possibly mark the wrapper as unsaveable."""
+ self._check_external_modification()
+ no_dep = isinstance(value, NoDependency)
+ if isinstance(key, six.string_types):
+ existing_dependency = self._lookup_dependency(key)
+ value = self._track_value(value, name=key)
+ else:
+ value = _wrap_or_unwrap(value)
+ existing_dependency = None
+ if not no_dep and isinstance(value, base.CheckpointableBase):
+ # Non-string keys are OK as long as we have no reason to add a
+ # dependency on the value (either because the value is not
+ # checkpointable, or because it was wrapped in a NoDependency object).
+ self._non_string_key = True
+ current_value = self._storage.setdefault(key, value)
+ if current_value is not value:
+ if ((not no_dep and isinstance(value, base.CheckpointableBase))
+ # We don't want to just check that the existing object is
+ # checkpointable, since it may have been wrapped in a NoDependency
+ # object.
+ or existing_dependency is not None):
+ # A checkpointable object was replaced under the same key; this means
+ # that restoring would be error-prone, so we'll throw an exception on
+ # save.
+ self._non_append_mutation = True
+ self._storage[key] = value
+
+ self._update_snapshot()
+
+ def __delitem__(self, key):
+ self._check_external_modification()
+ existing_value = self[key]
+ if isinstance(existing_value, base.CheckpointableBase):
+ # Deleting tracked checkpointable values means restoring is problematic,
+ # so we'll throw an exception on save.
+ self._non_append_mutation = True
+ del self._storage[key]
+ self._update_snapshot()
+
+ def __repr__(self):
+ return "DictWrapper(%s)" % (repr(self._storage),)
+
+ def __hash__(self):
+ raise TypeError("unhashable type: 'DictWrapper'")
+
+ def __eq__(self, other):
+ return self._storage == getattr(other, "_storage", other)
+
+ def update(self, *args, **kwargs):
+ for key, value in dict(*args, **kwargs).items():
+ self[key] = value
diff --git a/tensorflow/python/training/checkpointable/data_structures_test.py b/tensorflow/python/training/checkpointable/data_structures_test.py
index ec8c9da809..472b7c32b4 100644
--- a/tensorflow/python/training/checkpointable/data_structures_test.py
+++ b/tensorflow/python/training/checkpointable/data_structures_test.py
@@ -19,6 +19,7 @@ from __future__ import print_function
import os
import numpy
+import six
from tensorflow.python.eager import context
from tensorflow.python.eager import test
@@ -32,6 +33,7 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.training.checkpointable import data_structures
from tensorflow.python.training.checkpointable import tracking
+from tensorflow.python.training.checkpointable import util
class HasList(training.Model):
@@ -72,11 +74,14 @@ class ListTests(test.TestCase):
model = HasList()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
- self.assertEqual(2, len(model.layers))
- self.assertIs(model.layer_list, model.layers[0])
- self.assertEqual(10, len(model.layers[0].layers))
+ self.assertEqual(11, len(model.layers))
+ self.assertEqual(10, len(model.layer_list.layers))
+ six.assertCountEqual(
+ self,
+ model.layers,
+ model.layer_list.layers + model.layers_with_updates)
for index in range(10):
- self.assertEqual(3 + index, model.layers[0].layers[index].units)
+ self.assertEqual(3 + index, model.layer_list.layers[index].units)
self.assertEqual(2, len(model._checkpoint_dependencies))
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
@@ -123,9 +128,11 @@ class ListTests(test.TestCase):
self.l2 = []
model = HasEqualContainers()
- model.l1.append(HasEqualContainers())
- model.l2.append(HasEqualContainers())
- self.assertEqual([model.l1, model.l2], model.layers)
+ first_layer = HasEqualContainers()
+ model.l1.append(first_layer)
+ second_layer = HasEqualContainers()
+ model.l2.append(second_layer)
+ self.assertEqual([first_layer, second_layer], model.layers)
def testNotCheckpointable(self):
class NotCheckpointable(object):
@@ -260,9 +267,8 @@ class MappingTests(test.TestCase):
model = HasMapping()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape)
- self.assertEqual(1, len(model.layers))
- self.assertIs(model.layer_dict, model.layers[0])
- self.assertEqual(3, len(model.layers[0].layers))
+ self.assertEqual(5, len(model.layers))
+ six.assertCountEqual(self, model.layers, model.layer_dict.layers)
self.assertEqual(1, len(model._checkpoint_dependencies))
self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)
self.evaluate([v.initializer for v in model.variables])
@@ -298,6 +304,124 @@ class MappingTests(test.TestCase):
data_structures.Mapping()])
self.assertEqual(2, len(has_mappings))
self.assertNotIn(data_structures.Mapping(), has_mappings)
+ # In contrast to Mapping, dict wrappers are not hashable
+ a = tracking.Checkpointable()
+ a.d = {}
+ self.assertEqual({}, a.d)
+ self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison
+ self.assertNotEqual({1: 2}, a.d)
+ with self.assertRaisesRegexp(TypeError, "unhashable"):
+ set([a.d])
+
+ def testDictWrapperBadKeys(self):
+ a = tracking.Checkpointable()
+ a.d = {}
+ a.d[1] = data_structures.List()
+ model = training.Model()
+ model.sub = a
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ with self.assertRaisesRegexp(ValueError, "non-string key"):
+ model.save_weights(save_path)
+
+ def testDictWrapperNoDependency(self):
+ a = tracking.Checkpointable()
+ a.d = data_structures.NoDependency({})
+ a.d[1] = [3]
+ self.assertEqual([a], util.list_objects(a))
+ model = training.Model()
+ model.sub = a
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ model.save_weights(save_path)
+ model.load_weights(save_path)
+
+ def testNonStringKeyNotCheckpointableValue(self):
+ a = tracking.Checkpointable()
+ a.d = {}
+ a.d["a"] = [3]
+ a.d[1] = data_structures.NoDependency([3])
+ self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
+ model = training.Model()
+ model.sub = a
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ model.save_weights(save_path)
+ model.load_weights(save_path)
+
+ def testNonAppendNotCheckpointable(self):
+ # Non-append mutations (deleting or overwriting values) are OK when the
+ # values aren't tracked.
+ a = tracking.Checkpointable()
+ a.d = {}
+ a.d["a"] = [3]
+ a.d[1] = 3
+ a.d[1] = 2
+ self.assertEqual(2, a.d[1])
+ del a.d[1]
+ a.d[2] = data_structures.NoDependency(tracking.Checkpointable())
+ second = tracking.Checkpointable()
+ a.d[2] = data_structures.NoDependency(second)
+ self.assertIs(second, a.d[2])
+ self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
+ model = training.Model()
+ model.sub = a
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ model.save_weights(save_path)
+ model.load_weights(save_path)
+
+ def testDelNoSave(self):
+ model = training.Model()
+ model.d = {}
+ model.d["a"] = []
+ del model.d["a"]
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ with self.assertRaisesRegexp(ValueError, "overwritten or deleted"):
+ model.save_weights(save_path)
+
+ def testPopNoSave(self):
+ model = training.Model()
+ model.d = {}
+ model.d["a"] = []
+ model.d.pop("a")
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ with self.assertRaisesRegexp(ValueError, "overwritten or deleted"):
+ model.save_weights(save_path)
+
+ def testExternalModificationNoSave(self):
+ model = training.Model()
+ external_reference = {}
+ model.d = external_reference
+ external_reference["a"] = []
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ with self.assertRaisesRegexp(ValueError, "modified outside the wrapper"):
+ model.save_weights(save_path)
+
+ def testOverwriteNoSave(self):
+ model = training.Model()
+ model.d = {}
+ model.d["a"] = {}
+ model.d["a"] = {}
+ save_path = os.path.join(self.get_temp_dir(), "ckpt")
+ with self.assertRaisesRegexp(ValueError, "overwritten or deleted"):
+ model.save_weights(save_path)
+
+ def testIter(self):
+ model = training.Model()
+ model.d = {1: 3}
+ model.d[1] = 3
+ self.assertEqual([1], list(model.d))
+ new_dict = {}
+ # This update() is super tricky. If the dict wrapper subclasses dict,
+ # CPython will access its storage directly instead of calling any
+ # methods/properties on the object. So the options are either not to
+ # subclass dict (in which case update will call normal iter methods, but the
+ # object won't pass isinstance checks) or to subclass dict and keep that
+ # storage updated (no shadowing all its methods like _ListWrapper).
+ new_dict.update(model.d)
+ self.assertEqual({1: 3}, new_dict)
+
+ def testConstructableFromSequence(self):
+ result = data_structures._DictWrapper([(1, 2), (3, 4)])
+ self.assertIsInstance(result, dict)
+ self.assertEqual({1: 2, 3: 4}, result)
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/training/checkpointable/layer_utils.py b/tensorflow/python/training/checkpointable/layer_utils.py
index 978fcb2252..d65b631fe9 100644
--- a/tensorflow/python/training/checkpointable/layer_utils.py
+++ b/tensorflow/python/training/checkpointable/layer_utils.py
@@ -32,10 +32,15 @@ def is_layer(obj):
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers."""
- return [layer for layer in layer_list
- # Filter out only empty Checkpointable data structures. Empty Networks
- # will still show up in Model.layers.
- if is_layer(layer) or getattr(layer, "layers", True)]
+ filtered = []
+ for obj in layer_list:
+ if is_layer(obj):
+ filtered.append(obj)
+ else:
+ # Checkpointable data structures will not show up in ".layers" lists, but
+ # the layers they contain will.
+ filtered.extend(obj.layers)
+ return filtered
def gather_trainable_weights(trainable, sub_layers, extra_variables):
diff --git a/tensorflow/python/training/checkpointable/tracking_test.py b/tensorflow/python/training/checkpointable/tracking_test.py
index 96da0d6e47..f8d17cd417 100644
--- a/tensorflow/python/training/checkpointable/tracking_test.py
+++ b/tensorflow/python/training/checkpointable/tracking_test.py
@@ -19,6 +19,7 @@ from __future__ import print_function
import os
import numpy
+import six
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
@@ -144,6 +145,29 @@ class InterfaceTests(test.TestCase):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
+ def testDictionariesBasic(self):
+ a = training.Model()
+ b = training.Model()
+ a.attribute = {"b": b}
+ c = training.Model()
+ a.attribute["c"] = []
+ a.attribute["c"].append(c)
+ a_deps = util.list_objects(a)
+ self.assertIn(b, a_deps)
+ self.assertIn(c, a_deps)
+ self.assertIs(b, a.attribute["b"])
+ six.assertCountEqual(
+ self,
+ ["b", "c"],
+ [dep.name for dep in a.attribute._checkpoint_dependencies])
+ self.assertEqual([b, c], a.layers)
+ self.assertEqual([b, c], a.attribute.layers)
+ self.assertEqual([c], a.attribute["c"].layers)
+ checkpoint = util.Checkpoint(a=a)
+ save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
+ checkpoint.restore(save_path).assert_consumed()
+
+ @test_util.run_in_graph_and_eager_modes
def testNoDepList(self):
a = training.Model()
a.l1 = data_structures.NoDependency([])
@@ -159,12 +183,13 @@ class InterfaceTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testAssertions(self):
a = tracking.Checkpointable()
- a.l = [numpy.zeros([2, 2])]
- self.assertAllEqual([numpy.zeros([2, 2])], a.l)
- self.assertAllClose([numpy.zeros([2, 2])], a.l)
- nest.map_structure(self.assertAllClose, a.l, [numpy.zeros([2, 2])])
- a.tensors = [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]
- self.assertAllClose([numpy.ones([2, 2]), numpy.zeros([3, 3])],
+ a.l = {"k": [numpy.zeros([2, 2])]}
+ self.assertAllEqual(nest.flatten({"k": [numpy.zeros([2, 2])]}),
+ nest.flatten(a.l))
+ self.assertAllClose({"k": [numpy.zeros([2, 2])]}, a.l)
+ nest.map_structure(self.assertAllClose, a.l, {"k": [numpy.zeros([2, 2])]})
+ a.tensors = {"k": [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]}
+ self.assertAllClose({"k": [numpy.ones([2, 2]), numpy.zeros([3, 3])]},
self.evaluate(a.tensors))
if __name__ == "__main__":
diff --git a/tensorflow/python/training/checkpointable/util.py b/tensorflow/python/training/checkpointable/util.py
index 6ae5765b13..5d26a817d4 100644
--- a/tensorflow/python/training/checkpointable/util.py
+++ b/tensorflow/python/training/checkpointable/util.py
@@ -361,24 +361,42 @@ class _ObjectIdentityWeakKeyDictionary(_ObjectIdentityDictionary):
yield unwrapped
-class _ObjectIdentityWeakSet(collections.MutableSet):
- """Like weakref.WeakSet, but compares objects with "is"."""
+class _ObjectIdentitySet(collections.MutableSet):
+ """Like the built-in set, but compares objects with "is"."""
- def __init__(self):
- self._storage = set()
+ def __init__(self, *args):
+ self._storage = set([self._wrap_key(obj) for obj in list(*args)])
+
+ def _wrap_key(self, key):
+ return _ObjectIdentityWrapper(key)
def __contains__(self, key):
- return _WeakObjectIdentityWrapper(key) in self._storage
+ return self._wrap_key(key) in self._storage
def discard(self, key):
- self._storage.discard(_WeakObjectIdentityWrapper(key))
+ self._storage.discard(self._wrap_key(key))
def add(self, key):
- self._storage.add(_WeakObjectIdentityWrapper(key))
+ self._storage.add(self._wrap_key(key))
+
+ def __len__(self):
+ return len(self._storage)
+
+ def __iter__(self):
+ keys = list(self._storage)
+ for key in keys:
+ yield key.unwrapped
+
+
+class _ObjectIdentityWeakSet(_ObjectIdentitySet):
+ """Like weakref.WeakSet, but compares objects with "is"."""
+
+ def _wrap_key(self, key):
+ return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
- return len(list(self))
+ return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
@@ -747,7 +765,7 @@ def capture_dependencies(template):
initial_value=initializer,
name=name,
**inner_kwargs)
- if name.startswith(name_prefix):
+ if name is not None and name.startswith(name_prefix):
scope_stripped_name = name[len(name_prefix) + 1:]
if not checkpointable_parent:
return template._add_variable_with_custom_getter( # pylint: disable=protected-access
@@ -857,8 +875,8 @@ class CheckpointLoadStatus(_LoadStatus):
for checkpointable_object in list_objects(self._root_checkpointable):
self._checkpoint.all_python_objects.add(checkpointable_object)
unused_python_objects = (
- set(self._checkpoint.all_python_objects)
- - set(self._checkpoint.object_by_proto_id.values()))
+ _ObjectIdentitySet(self._checkpoint.all_python_objects)
+ - _ObjectIdentitySet(self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Some Python objects were not bound to checkpointed values, likely "
diff --git a/tensorflow/python/training/learning_rate_decay.py b/tensorflow/python/training/learning_rate_decay.py
index 51190264e8..fd195a7965 100644
--- a/tensorflow/python/training/learning_rate_decay.py
+++ b/tensorflow/python/training/learning_rate_decay.py
@@ -356,7 +356,15 @@ def natural_exp_decay(learning_rate,
The function returns the decayed learning rate. It is computed as:
```python
- decayed_learning_rate = learning_rate * exp(-decay_rate * global_step)
+ decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
+ decay_step)
+ ```
+
+ or, if `staircase` is `True`, as:
+
+ ```python
+ decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
+ decay_step))
```
Example: decay exponentially with a base of 0.96:
@@ -365,8 +373,10 @@ def natural_exp_decay(learning_rate,
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
+ decay_steps = 5
k = 0.5
- learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k)
+ learning_rate = tf.train.natural_exp_decay(learning_rate, global_step,
+ decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
diff --git a/tensorflow/python/training/quantize_training.i b/tensorflow/python/training/quantize_training.i
index fb5e47efa0..54d6789616 100644
--- a/tensorflow/python/training/quantize_training.i
+++ b/tensorflow/python/training/quantize_training.i
@@ -73,6 +73,8 @@ def do_quantize_training_on_graphdef(input_graph, num_bits):
do_quantize_training_on_graphdef._tf_api_names = [
'train.do_quantize_training_on_graphdef']
+do_quantize_training_on_graphdef._tf_api_names_v1 = [
+ 'train.do_quantize_training_on_graphdef']
%}
%unignoreall
diff --git a/tensorflow/python/training/saver.py b/tensorflow/python/training/saver.py
index 1ee975fbe4..60885e9292 100644
--- a/tensorflow/python/training/saver.py
+++ b/tensorflow/python/training/saver.py
@@ -126,8 +126,10 @@ class BaseSaverBuilder(object):
def f():
with ops.device(v.device):
x = v.read_value()
- with ops.device("/device:CPU:0"):
- return array_ops.identity(x)
+ # To allow variables placed on non-CPU devices to be checkpointed,
+ # we copy them to CPU on the same machine first.
+ with ops.device("/device:CPU:0"):
+ return array_ops.identity(x)
return f
self.handle_op = var.handle
diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py
index ae9c244aaf..ecce8ae6bd 100644
--- a/tensorflow/python/training/saver_test.py
+++ b/tensorflow/python/training/saver_test.py
@@ -174,6 +174,24 @@ class SaverTest(test.TestCase):
def testResourceBasic(self):
self.basicSaveRestore(resource_variable_ops.ResourceVariable)
+ def testResourceColocation(self):
+ partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2)
+ with ops_lib.device("/job:ps/device:GPU:0"):
+ v = variable_scope.get_variable("v0",
+ shape=[10, 2],
+ partitioner=partitioner,
+ use_resource=True)
+ saver_module.Saver({"v0": v}).build()
+ save_op = None
+ for op in ops_lib.get_default_graph().get_operations():
+ if op.type == "SaveV2":
+ save_op = op
+ break
+ assert save_op is not None
+ for save_inp in save_op.inputs[3:]:
+ # Input to SaveV2 op is placed on CPU of the same device as the Variable.
+ self.assertEqual("/job:ps/device:CPU:0", save_inp.device)
+
def testResourceVariableReadOpsAddedDeterministically(self):
graph_defs = []
num_graphs = 10
diff --git a/tensorflow/python/training/server_lib.py b/tensorflow/python/training/server_lib.py
index 2f421d1cc0..58cf5277fe 100644
--- a/tensorflow/python/training/server_lib.py
+++ b/tensorflow/python/training/server_lib.py
@@ -42,8 +42,8 @@ def _make_server_def(server_or_cluster_def, job_name, task_index, protocol,
Defaults to the value in `server_or_cluster_def`, if specified. Otherwise
defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
- Acceptable values include `"grpc"`. Defaults to the value in
- `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
+ Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the value
+ in `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
config: (Options.) A `tf.ConfigProto` that specifies default configuration
options for all sessions that run on this server.
@@ -129,8 +129,9 @@ class Server(object):
job. Defaults to the value in `server_or_cluster_def`, if specified.
Otherwise defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
- Acceptable values include `"grpc"`. Defaults to the value in
- `server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
+ Acceptable values include `"grpc", "grpc+verbs"`. Defaults to the
+ value in `server_or_cluster_def`, if specified. Otherwise defaults to
+ `"grpc"`.
config: (Options.) A `tf.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
start: (Optional.) Boolean, indicating whether to start the server
diff --git a/tensorflow/python/training/warm_starting_util.py b/tensorflow/python/training/warm_starting_util.py
index ec740abdd1..b1a7cfab83 100644
--- a/tensorflow/python/training/warm_starting_util.py
+++ b/tensorflow/python/training/warm_starting_util.py
@@ -22,7 +22,6 @@ import collections
import six
from tensorflow.python.framework import ops
-from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
@@ -83,11 +82,6 @@ class VocabInfo(
)
-def _is_variable(x):
- return (isinstance(x, variables_lib.Variable) or
- isinstance(x, resource_variable_ops.ResourceVariable))
-
-
def _infer_var_name(var):
"""Returns name of the `var`.
@@ -126,9 +120,10 @@ def _warm_start_var(var, prev_ckpt, prev_tensor_name=None):
prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If
None, we lookup tensor with same name as given `var`.
"""
- if _is_variable(var):
+ if checkpoint_utils._is_variable(var): # pylint: disable=protected-access
current_var_name = _infer_var_name([var])
- elif isinstance(var, list) and all(_is_variable(v) for v in var):
+ elif (isinstance(var, list) and
+ all(checkpoint_utils._is_variable(v) for v in var)): # pylint: disable=protected-access
current_var_name = _infer_var_name(var)
elif isinstance(var, variables_lib.PartitionedVariable):
current_var_name = _infer_var_name([var])
@@ -193,9 +188,10 @@ def _warm_start_var_with_vocab(var,
prev_vocab_path):
raise ValueError("Invalid args: Must provide all of [current_vocab_path, "
"current_vocab_size, prev_ckpt, prev_vocab_path}.")
- if _is_variable(var):
+ if checkpoint_utils._is_variable(var):
var = [var]
- elif isinstance(var, list) and all(_is_variable(v) for v in var):
+ elif (isinstance(var, list) and
+ all(checkpoint_utils._is_variable(v) for v in var)):
var = var
elif isinstance(var, variables_lib.PartitionedVariable):
var = var._get_variable_list()
@@ -271,7 +267,7 @@ def _get_grouped_variables(vars_to_warm_start):
for v in vars_to_warm_start:
list_of_vars += ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=v)
- elif all([_is_variable(v) for v in vars_to_warm_start]):
+ elif all([checkpoint_utils._is_variable(v) for v in vars_to_warm_start]): # pylint: disable=protected-access
list_of_vars = vars_to_warm_start
else:
raise ValueError("If `vars_to_warm_start` is a list, it must be all "
diff --git a/tensorflow/python/util/deprecation.py b/tensorflow/python/util/deprecation.py
index c8ed2b715d..9e2202eaf8 100644
--- a/tensorflow/python/util/deprecation.py
+++ b/tensorflow/python/util/deprecation.py
@@ -37,6 +37,11 @@ _PRINT_DEPRECATION_WARNINGS = True
_PRINTED_WARNING = {}
+class DeprecatedNamesAlreadySet(Exception):
+ """Raised when setting deprecated names multiple times for the same symbol."""
+ pass
+
+
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
@@ -219,6 +224,35 @@ def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
func_or_class.__doc__, None, 'Please use %s instead.' % name))
+def deprecated_endpoints(*args):
+ """Decorator for marking endpoints deprecated.
+
+ This decorator does not print deprecation messages.
+ TODO(annarev): eventually start printing deprecation warnings when
+ @deprecation_endpoints decorator is added.
+
+ Args:
+ *args: Deprecated endpoint names.
+
+ Returns:
+ A function that takes symbol as an argument and adds
+ _tf_deprecated_api_names to that symbol.
+ _tf_deprecated_api_names would be set to a list of deprecated
+ endpoint names for the symbol.
+ """
+ def deprecated_wrapper(func):
+ # pylint: disable=protected-access
+ if '_tf_deprecated_api_names' in func.__dict__:
+ raise DeprecatedNamesAlreadySet(
+ 'Cannot set deprecated names for %s to %s. '
+ 'Deprecated names are already set to %s.' % (
+ func.__name__, str(args), str(func._tf_deprecated_api_names)))
+ func._tf_deprecated_api_names = args
+ # pylint: disable=protected-access
+ return func
+ return deprecated_wrapper
+
+
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
diff --git a/tensorflow/python/util/deprecation_test.py b/tensorflow/python/util/deprecation_test.py
index 1ea695e4d6..90c73a0a58 100644
--- a/tensorflow/python/util/deprecation_test.py
+++ b/tensorflow/python/util/deprecation_test.py
@@ -935,5 +935,27 @@ class DeprecationArgumentsTest(test.TestCase):
self.assertEqual(new_docs, new_docs_ref)
+class DeprecatedEndpointsTest(test.TestCase):
+
+ def testSingleDeprecatedEndpoint(self):
+ @deprecation.deprecated_endpoints("foo1")
+ def foo():
+ pass
+ self.assertEqual(("foo1",), foo._tf_deprecated_api_names)
+
+ def testMultipleDeprecatedEndpoint(self):
+ @deprecation.deprecated_endpoints("foo1", "foo2")
+ def foo():
+ pass
+ self.assertEqual(("foo1", "foo2"), foo._tf_deprecated_api_names)
+
+ def testCannotSetDeprecatedEndpointsTwice(self):
+ with self.assertRaises(deprecation.DeprecatedNamesAlreadySet):
+ @deprecation.deprecated_endpoints("foo1")
+ @deprecation.deprecated_endpoints("foo2")
+ def foo(): # pylint: disable=unused-variable
+ pass
+
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/python/util/nest.py b/tensorflow/python/util/nest.py
index d63f59a8c8..5aac559b9b 100644
--- a/tensorflow/python/util/nest.py
+++ b/tensorflow/python/util/nest.py
@@ -73,7 +73,7 @@ def _sequence_like(instance, args):
Returns:
`args` with the type of `instance`.
"""
- if isinstance(instance, dict):
+ if isinstance(instance, (dict, _collections.Mapping)):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
@@ -89,7 +89,7 @@ def _sequence_like(instance, args):
def _yield_value(iterable):
- if isinstance(iterable, dict):
+ if isinstance(iterable, (dict, _collections.Mapping)):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
@@ -215,7 +215,7 @@ def flatten_dict_items(dictionary):
ValueError: If any key and value have not the same structure, or if keys are
not unique.
"""
- if not isinstance(dictionary, dict):
+ if not isinstance(dictionary, (dict, _collections.Mapping)):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
@@ -455,7 +455,7 @@ def assert_shallow_structure(shallow_tree, input_tree, check_types=True):
"structure has length %s, while shallow structure has length %s."
% (len(input_tree), len(shallow_tree)))
- if check_types and isinstance(shallow_tree, dict):
+ if check_types and isinstance(shallow_tree, (dict, _collections.Mapping)):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
@@ -716,7 +716,7 @@ def yield_flat_paths(nest):
# The _maybe_add_final_path_element function is used below in order to avoid
# adding trailing slashes when the sub-element recursed into is a leaf.
- if isinstance(nest, dict):
+ if isinstance(nest, (dict, _collections.Mapping)):
for key in _sorted(nest):
value = nest[key]
for sub_path in yield_flat_paths(value):
@@ -760,3 +760,4 @@ def flatten_with_joined_string_paths(structure, separator="/"):
_pywrap_tensorflow.RegisterSequenceClass(_collections.Sequence)
+_pywrap_tensorflow.RegisterMappingClass(_collections.Mapping)
diff --git a/tensorflow/python/util/nest_test.py b/tensorflow/python/util/nest_test.py
index 2f12b25354..26c6ea4b01 100644
--- a/tensorflow/python/util/nest_test.py
+++ b/tensorflow/python/util/nest_test.py
@@ -21,6 +21,7 @@ from __future__ import print_function
import collections
import time
+from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
@@ -33,7 +34,22 @@ from tensorflow.python.platform import test
from tensorflow.python.util import nest
-class NestTest(test.TestCase):
+class _CustomMapping(collections.Mapping):
+
+ def __init__(self, *args, **kwargs):
+ self._wrapped = dict(*args, **kwargs)
+
+ def __getitem__(self, key):
+ return self._wrapped[key]
+
+ def __iter__(self):
+ return iter(self._wrapped)
+
+ def __len__(self):
+ return len(self._wrapped)
+
+
+class NestTest(parameterized.TestCase, test.TestCase):
PointXY = collections.namedtuple("Point", ["x", "y"]) # pylint: disable=invalid-name
@@ -72,26 +88,32 @@ class NestTest(test.TestCase):
with self.assertRaises(ValueError):
nest.pack_sequence_as([5, 6, [7, 8]], ["a", "b", "c"])
+ @parameterized.parameters({"mapping_type": collections.OrderedDict},
+ {"mapping_type": _CustomMapping})
@test_util.assert_no_new_pyobjects_executing_eagerly
- def testFlattenDictOrder(self):
+ def testFlattenDictOrder(self, mapping_type):
"""`flatten` orders dicts by key, including OrderedDicts."""
- ordered = collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
+ ordered = mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)])
plain = {"d": 3, "b": 1, "a": 0, "c": 2}
ordered_flat = nest.flatten(ordered)
plain_flat = nest.flatten(plain)
self.assertEqual([0, 1, 2, 3], ordered_flat)
self.assertEqual([0, 1, 2, 3], plain_flat)
- def testPackDictOrder(self):
+ @parameterized.parameters({"mapping_type": collections.OrderedDict},
+ {"mapping_type": _CustomMapping})
+ def testPackDictOrder(self, mapping_type):
"""Packing orders dicts by key, including OrderedDicts."""
- ordered = collections.OrderedDict([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
+ custom = mapping_type([("d", 0), ("b", 0), ("a", 0), ("c", 0)])
plain = {"d": 0, "b": 0, "a": 0, "c": 0}
seq = [0, 1, 2, 3]
- ordered_reconstruction = nest.pack_sequence_as(ordered, seq)
+ custom_reconstruction = nest.pack_sequence_as(custom, seq)
plain_reconstruction = nest.pack_sequence_as(plain, seq)
+ self.assertIsInstance(custom_reconstruction, mapping_type)
+ self.assertIsInstance(plain_reconstruction, dict)
self.assertEqual(
- collections.OrderedDict([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
- ordered_reconstruction)
+ mapping_type([("d", 3), ("b", 1), ("a", 0), ("c", 2)]),
+ custom_reconstruction)
self.assertEqual({"d": 3, "b": 1, "a": 0, "c": 2}, plain_reconstruction)
Abc = collections.namedtuple("A", ("b", "c")) # pylint: disable=invalid-name
@@ -101,8 +123,10 @@ class NestTest(test.TestCase):
# A nice messy mix of tuples, lists, dicts, and `OrderedDict`s.
mess = [
"z",
- NestTest.Abc(3, 4),
- {
+ NestTest.Abc(3, 4), {
+ "d": _CustomMapping({
+ 41: 4
+ }),
"c": [
1,
collections.OrderedDict([
@@ -111,17 +135,19 @@ class NestTest(test.TestCase):
]),
],
"b": 5
- },
- 17
+ }, 17
]
flattened = nest.flatten(mess)
- self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17])
+ self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 4, 17])
structure_of_mess = [
14,
NestTest.Abc("a", True),
{
+ "d": _CustomMapping({
+ 41: 42
+ }),
"c": [
0,
collections.OrderedDict([
@@ -142,6 +168,10 @@ class NestTest(test.TestCase):
self.assertIsInstance(unflattened_ordered_dict, collections.OrderedDict)
self.assertEqual(list(unflattened_ordered_dict.keys()), ["b", "a"])
+ unflattened_custom_mapping = unflattened[2]["d"]
+ self.assertIsInstance(unflattened_custom_mapping, _CustomMapping)
+ self.assertEqual(list(unflattened_custom_mapping.keys()), [41])
+
def testFlatten_numpyIsNotFlattened(self):
structure = np.array([1, 2, 3])
flattened = nest.flatten(structure)
@@ -179,19 +209,23 @@ class NestTest(test.TestCase):
self.assertFalse(nest.is_sequence(math_ops.tanh(ones)))
self.assertFalse(nest.is_sequence(np.ones((4, 5))))
- def testFlattenDictItems(self):
- dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
+ @parameterized.parameters({"mapping_type": _CustomMapping},
+ {"mapping_type": dict})
+ def testFlattenDictItems(self, mapping_type):
+ dictionary = mapping_type({(4, 5, (6, 8)): ("a", "b", ("c", "d"))})
flat = {4: "a", 5: "b", 6: "c", 8: "d"}
self.assertEqual(nest.flatten_dict_items(dictionary), flat)
with self.assertRaises(TypeError):
nest.flatten_dict_items(4)
- bad_dictionary = {(4, 5, (4, 8)): ("a", "b", ("c", "d"))}
+ bad_dictionary = mapping_type({(4, 5, (4, 8)): ("a", "b", ("c", "d"))})
with self.assertRaisesRegexp(ValueError, "not unique"):
nest.flatten_dict_items(bad_dictionary)
- another_bad_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))}
+ another_bad_dictionary = mapping_type({
+ (4, 5, (6, 8)): ("a", "b", ("c", ("d", "e")))
+ })
with self.assertRaisesRegexp(
ValueError, "Key had [0-9]* elements, but value had [0-9]* elements"):
nest.flatten_dict_items(another_bad_dictionary)
diff --git a/tensorflow/python/util/py_checkpoint_reader.i b/tensorflow/python/util/py_checkpoint_reader.i
index 8004898cbc..1c73f7f06f 100644
--- a/tensorflow/python/util/py_checkpoint_reader.i
+++ b/tensorflow/python/util/py_checkpoint_reader.i
@@ -166,6 +166,7 @@ def NewCheckpointReader(filepattern):
return CheckpointReader(compat.as_bytes(filepattern), status)
NewCheckpointReader._tf_api_names = ['train.NewCheckpointReader']
+NewCheckpointReader._tf_api_names_v1 = ['train.NewCheckpointReader']
%}
%include "tensorflow/c/checkpoint_reader.h"
diff --git a/tensorflow/python/util/stat_summarizer.i b/tensorflow/python/util/stat_summarizer.i
index 73fa85494b..a5a7984d91 100644
--- a/tensorflow/python/util/stat_summarizer.i
+++ b/tensorflow/python/util/stat_summarizer.i
@@ -27,8 +27,8 @@ limitations under the License.
%ignoreall
-%unignore _NewStatSummarizer;
-%unignore _DeleteStatSummarizer;
+%unignore NewStatSummarizer;
+%unignore DeleteStatSummarizer;
%unignore tensorflow;
%unignore tensorflow::StatSummarizer;
%unignore tensorflow::StatSummarizer::StatSummarizer;
@@ -43,20 +43,20 @@ limitations under the License.
// TODO(ashankar): Remove the unused argument from the API.
%{
-tensorflow::StatSummarizer* _NewStatSummarizer(
+tensorflow::StatSummarizer* NewStatSummarizer(
const string& unused) {
return new tensorflow::StatSummarizer(tensorflow::StatSummarizerOptions());
}
%}
%{
-void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss) {
+void DeleteStatSummarizer(tensorflow::StatSummarizer* ss) {
delete ss;
}
%}
-tensorflow::StatSummarizer* _NewStatSummarizer(const string& unused);
-void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
+tensorflow::StatSummarizer* NewStatSummarizer(const string& unused);
+void DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
%extend tensorflow::StatSummarizer {
void ProcessStepStatsStr(const string& step_stats_str) {
@@ -76,16 +76,3 @@ void _DeleteStatSummarizer(tensorflow::StatSummarizer* ss);
%include "tensorflow/core/util/stat_summarizer_options.h"
%include "tensorflow/core/util/stat_summarizer.h"
%unignoreall
-
-%insert("python") %{
-
-# Wrapping NewStatSummarizer and DeletStatSummarizer because
-# SWIG-generated functions are built-in functions and do not support
-# setting _tf_api_names attribute.
-
-def NewStatSummarizer(unused):
- return _NewStatSummarizer(unused)
-
-def DeleteStatSummarizer(stat_summarizer):
- _DeleteStatSummarizer(stat_summarizer)
-%}
diff --git a/tensorflow/python/util/tf_export.py b/tensorflow/python/util/tf_export.py
index e154ffb68a..274f32c21f 100644
--- a/tensorflow/python/util/tf_export.py
+++ b/tensorflow/python/util/tf_export.py
@@ -63,12 +63,63 @@ API_ATTRS = {
'_estimator_api_constants')
}
+API_ATTRS_V1 = {
+ TENSORFLOW_API_NAME: _Attributes(
+ '_tf_api_names_v1',
+ '_tf_api_constants_v1'),
+ ESTIMATOR_API_NAME: _Attributes(
+ '_estimator_api_names_v1',
+ '_estimator_api_constants_v1')
+}
+
class SymbolAlreadyExposedError(Exception):
"""Raised when adding API names to symbol that already has API names."""
pass
+def get_canonical_name_for_symbol(symbol, api_name=TENSORFLOW_API_NAME):
+ """Get canonical name for the API symbol.
+
+ Canonical name is the first non-deprecated endpoint name.
+
+ Args:
+ symbol: API function or class.
+ api_name: API name (tensorflow or estimator).
+
+ Returns:
+ Canonical name for the API symbol (for e.g. initializers.zeros) if
+ canonical name could be determined. Otherwise, returns None.
+ """
+ if not hasattr(symbol, '__dict__'):
+ return None
+ api_names_attr = API_ATTRS[api_name].names
+ _, undecorated_symbol = tf_decorator.unwrap(symbol)
+ if api_names_attr not in undecorated_symbol.__dict__:
+ return None
+ api_names = getattr(undecorated_symbol, api_names_attr)
+ # TODO(annarev): may be add a separate deprecated attribute
+ # for estimator names.
+ deprecated_api_names = undecorated_symbol.__dict__.get(
+ '_tf_deprecated_api_names', [])
+ return get_canonical_name(api_names, deprecated_api_names)
+
+
+def get_canonical_name(api_names, deprecated_api_names):
+ """Get first non-deprecated endpoint name.
+
+ Args:
+ api_names: API names iterable.
+ deprecated_api_names: Deprecated API names iterable.
+ Returns:
+ Canonical name if there is at least one non-deprecated endpoint.
+ Otherwise returns None.
+ """
+ return next(
+ (name for name in api_names if name not in deprecated_api_names),
+ None)
+
+
class api_export(object): # pylint: disable=invalid-name
"""Provides ways to export symbols to the TensorFlow API."""
@@ -78,13 +129,16 @@ class api_export(object): # pylint: disable=invalid-name
Args:
*args: API names in dot delimited format.
**kwargs: Optional keyed arguments.
- overrides: List of symbols that this is overriding
+ v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
+ names both for TensorFlow V1 and V2 APIs.
+ overrides: List of symbols that this is overriding
(those overrided api exports will be removed). Note: passing overrides
has no effect on exporting a constant.
- api_name: Name of the API you want to generate (e.g. `tensorflow` or
+ api_name: Name of the API you want to generate (e.g. `tensorflow` or
`estimator`). Default is `tensorflow`.
"""
self._names = args
+ self._names_v1 = kwargs.get('v1', args)
self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
self._overrides = kwargs.get('overrides', [])
@@ -102,24 +156,27 @@ class api_export(object): # pylint: disable=invalid-name
and kwarg `allow_multiple_exports` not set.
"""
api_names_attr = API_ATTRS[self._api_name].names
-
+ api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
# Undecorate overridden names
for f in self._overrides:
_, undecorated_f = tf_decorator.unwrap(f)
delattr(undecorated_f, api_names_attr)
+ delattr(undecorated_f, api_names_attr_v1)
_, undecorated_func = tf_decorator.unwrap(func)
+ self.set_attr(undecorated_func, api_names_attr, self._names)
+ self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
+ return func
+ def set_attr(self, func, api_names_attr, names):
# Check for an existing api. We check if attribute name is in
# __dict__ instead of using hasattr to verify that subclasses have
# their own _tf_api_names as opposed to just inheriting it.
- if api_names_attr in undecorated_func.__dict__:
+ if api_names_attr in func.__dict__:
raise SymbolAlreadyExposedError(
'Symbol %s is already exposed as %s.' %
- (undecorated_func.__name__, getattr(
- undecorated_func, api_names_attr))) # pylint: disable=protected-access
- setattr(undecorated_func, api_names_attr, self._names)
- return func
+ (func.__name__, getattr(func, api_names_attr))) # pylint: disable=protected-access
+ setattr(func, api_names_attr, names)
def export_constant(self, module_name, name):
"""Store export information for constants/string literals.
@@ -140,12 +197,20 @@ class api_export(object): # pylint: disable=invalid-name
name: (string) Current constant name.
"""
module = sys.modules[module_name]
- if not hasattr(module, API_ATTRS[self._api_name].constants):
- setattr(module, API_ATTRS[self._api_name].constants, [])
+ api_constants_attr = API_ATTRS[self._api_name].constants
+ api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
+
+ if not hasattr(module, api_constants_attr):
+ setattr(module, api_constants_attr, [])
# pylint: disable=protected-access
- getattr(module, API_ATTRS[self._api_name].constants).append(
+ getattr(module, api_constants_attr).append(
(self._names, name))
+ if not hasattr(module, api_constants_attr_v1):
+ setattr(module, api_constants_attr_v1, [])
+ getattr(module, api_constants_attr_v1).append(
+ (self._names_v1, name))
+
tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
estimator_export = functools.partial(tf_export, api_name=ESTIMATOR_API_NAME)
diff --git a/tensorflow/python/util/tf_export_test.py b/tensorflow/python/util/tf_export_test.py
index b9e26ecb33..4ae1dc55e0 100644
--- a/tensorflow/python/util/tf_export_test.py
+++ b/tensorflow/python/util/tf_export_test.py
@@ -60,6 +60,8 @@ class ValidateExportTest(test.TestCase):
for symbol in [_test_function, _test_function, TestClassA, TestClassB]:
if hasattr(symbol, '_tf_api_names'):
del symbol._tf_api_names
+ if hasattr(symbol, '_tf_api_names_v1'):
+ del symbol._tf_api_names_v1
def _CreateMockModule(self, name):
mock_module = self.MockModule(name)
diff --git a/tensorflow/python/util/tf_stack.py b/tensorflow/python/util/tf_stack.py
new file mode 100644
index 0000000000..fe4f4a63eb
--- /dev/null
+++ b/tensorflow/python/util/tf_stack.py
@@ -0,0 +1,103 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Functions used to extract and analyze stacks. Faster than Python libs."""
+# pylint: disable=g-bad-name
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import linecache
+import sys
+
+# Names for indices into TF traceback tuples.
+TB_FILENAME = 0
+TB_LINENO = 1
+TB_FUNCNAME = 2
+TB_CODEDICT = 3 # Dictionary of Python interpreter state.
+
+
+def extract_stack(extract_frame_info_fn=None):
+ """A lightweight, extensible re-implementation of traceback.extract_stack.
+
+ NOTE(mrry): traceback.extract_stack eagerly retrieves the line of code for
+ each stack frame using linecache, which results in an abundance of stat()
+ calls. This implementation does not retrieve the code, and any consumer
+ should apply _convert_stack to the result to obtain a traceback that can
+ be formatted etc. using traceback methods.
+
+ Args:
+ extract_frame_info_fn: Optional callable fn(stack_frame) applied to each
+ stack frame. This callable's return value is stored as the sixth (last)
+ element of the returned tuples. If not provided, the returned tuples
+ will have None as their sixth value.
+
+ Returns:
+ A list of 6-tuples
+ (filename, lineno, name, frame_globals, func_start_lineno, custom_info)
+ corresponding to the call stack of the current thread. The returned tuples
+ have the innermost stack frame at the end, unlike the Python inspect
+ module's stack() function.
+ """
+ default_fn = lambda f: None
+ extract_frame_info_fn = extract_frame_info_fn or default_fn
+ try:
+ raise ZeroDivisionError
+ except ZeroDivisionError:
+ f = sys.exc_info()[2].tb_frame.f_back
+ ret = []
+ while f is not None:
+ lineno = f.f_lineno
+ co = f.f_code
+ filename = co.co_filename
+ name = co.co_name
+ frame_globals = f.f_globals
+ func_start_lineno = co.co_firstlineno
+ frame_info = extract_frame_info_fn(f)
+ ret.append((filename, lineno, name, frame_globals, func_start_lineno,
+ frame_info))
+ f = f.f_back
+ ret.reverse()
+ return ret
+
+
+def convert_stack(stack, include_func_start_lineno=False):
+ """Converts a stack extracted using extract_stack() to a traceback stack.
+
+ Args:
+ stack: A list of n 5-tuples,
+ (filename, lineno, name, frame_globals, func_start_lineno).
+ include_func_start_lineno: True if function start line number should be
+ included as the 5th entry in return tuples.
+
+ Returns:
+ A list of n 4-tuples or 5-tuples
+ (filename, lineno, name, code, [optional: func_start_lineno]), where the
+ code tuple element is calculated from the corresponding elements of the
+ input tuple.
+ """
+ ret = []
+ for (filename, lineno, name, frame_globals, func_start_lineno,
+ unused_frame_info) in stack:
+ linecache.checkcache(filename)
+ line = linecache.getline(filename, lineno, frame_globals)
+ if line:
+ line = line.strip()
+ else:
+ line = None
+ if include_func_start_lineno:
+ ret.append((filename, lineno, name, line, func_start_lineno))
+ else:
+ ret.append((filename, lineno, name, line))
+ return ret
diff --git a/tensorflow/python/util/util.cc b/tensorflow/python/util/util.cc
index 366f8a0deb..ad85a44f8d 100644
--- a/tensorflow/python/util/util.cc
+++ b/tensorflow/python/util/util.cc
@@ -31,6 +31,8 @@ namespace {
// Type object for collections.Sequence. This is set by RegisterSequenceClass.
PyObject* CollectionsSequenceType = nullptr;
+// Type object for collections.Mapping, set by RegisterMappingClass.
+PyObject* CollectionsMappingType = nullptr;
PyTypeObject* SparseTensorValueType = nullptr;
const int kMaxItemsInCache = 1024;
@@ -45,6 +47,23 @@ bool IsString(PyObject* o) {
PyUnicode_Check(o);
}
+// Work around a writable-strings warning with Python 2's PyMapping_Keys macro,
+// and while we're at it give them consistent behavior by making sure the
+// returned value is a list.
+//
+// As with PyMapping_Keys, returns a new reference.
+PyObject* MappingKeys(PyObject* o) {
+#if PY_MAJOR_VERSION >= 3
+ return PyMapping_Keys(o);
+#else
+ static char key_method_name[] = "keys";
+ Safe_PyObjectPtr raw_result(PyObject_CallMethod(o, key_method_name, nullptr));
+ return PySequence_Fast(
+ raw_result.get(),
+ "The '.keys()' method of a custom mapping returned a non-sequence.");
+#endif
+}
+
// Equivalent to Python's 'o.__class__.__name__'
// Note that '__class__' attribute is set only in new-style classes.
// A lot of tensorflow code uses __class__ without checks, so it seems like
@@ -85,6 +104,119 @@ string PyObjectToString(PyObject* o) {
}
}
+class CachedTypeCheck {
+ public:
+ explicit CachedTypeCheck(std::function<int(PyObject*)> ternary_predicate)
+ : ternary_predicate_(std::move(ternary_predicate)) {}
+
+ ~CachedTypeCheck() {
+ mutex_lock l(type_to_sequence_map_mu_);
+ for (const auto& pair : type_to_sequence_map_) {
+ Py_DECREF(pair.first);
+ }
+ }
+
+ // Caches successful executions of the one-argument (PyObject*) callable
+ // "ternary_predicate" based on the type of "o". -1 from the callable
+ // indicates an unsuccessful check (not cached), 0 indicates that "o"'s type
+ // does not match the predicate, and 1 indicates that it does. Used to avoid
+ // calling back into Python for expensive isinstance checks.
+ int CachedLookup(PyObject* o) {
+ // Try not to return to Python - see if the type has already been seen
+ // before.
+
+ auto* type = Py_TYPE(o);
+
+ {
+ mutex_lock l(type_to_sequence_map_mu_);
+ auto it = type_to_sequence_map_.find(type);
+ if (it != type_to_sequence_map_.end()) {
+ return it->second;
+ }
+ }
+
+ int check_result = ternary_predicate_(o);
+
+ if (check_result == -1) {
+ return -1; // Type check error, not cached.
+ }
+
+ // NOTE: This is never decref'd as long as the object lives, which is likely
+ // forever, but we don't want the type to get deleted as long as it is in
+ // the map. This should not be too much of a leak, as there should only be a
+ // relatively small number of types in the map, and an even smaller number
+ // that are eligible for decref. As a precaution, we limit the size of the
+ // map to 1024.
+ {
+ mutex_lock l(type_to_sequence_map_mu_);
+ if (type_to_sequence_map_.size() < kMaxItemsInCache) {
+ Py_INCREF(type);
+ type_to_sequence_map_.insert({type, check_result});
+ }
+ }
+
+ return check_result;
+ }
+
+ private:
+ std::function<int(PyObject*)> ternary_predicate_;
+ mutex type_to_sequence_map_mu_;
+ std::unordered_map<PyTypeObject*, bool> type_to_sequence_map_
+ GUARDED_BY(type_to_sequence_map_mu_);
+};
+
+// Returns 1 if `o` is considered a mapping for the purposes of Flatten().
+// Returns 0 otherwise.
+// Returns -1 if an error occurred.
+int IsMappingHelper(PyObject* o) {
+ static auto* const check_cache = new CachedTypeCheck([](PyObject* to_check) {
+ return PyObject_IsInstance(to_check, CollectionsMappingType);
+ });
+ if (PyDict_Check(o)) return true;
+ if (TF_PREDICT_FALSE(CollectionsMappingType == nullptr)) {
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ tensorflow::strings::StrCat(
+ "collections.Mapping type has not been set. "
+ "Please call RegisterMappingClass before using this module")
+ .c_str());
+ return -1;
+ }
+ return check_cache->CachedLookup(o);
+}
+
+// Returns 1 if `o` is considered a sequence for the purposes of Flatten().
+// Returns 0 otherwise.
+// Returns -1 if an error occurred.
+int IsSequenceHelper(PyObject* o) {
+ static auto* const check_cache = new CachedTypeCheck([](PyObject* to_check) {
+ int is_instance = PyObject_IsInstance(to_check, CollectionsSequenceType);
+
+ // Don't cache a failed is_instance check.
+ if (is_instance == -1) return -1;
+
+ return static_cast<int>(is_instance != 0 && !IsString(to_check));
+ });
+ // We treat dicts and other mappings as special cases of sequences.
+ if (IsMappingHelper(o)) return true;
+ if (PySet_Check(o) && !WarnedThatSetIsNotSequence) {
+ LOG(WARNING) << "Sets are not currently considered sequences, "
+ "but this may change in the future, "
+ "so consider avoiding using them.";
+ WarnedThatSetIsNotSequence = true;
+ }
+ if (TF_PREDICT_FALSE(CollectionsSequenceType == nullptr)) {
+ PyErr_SetString(
+ PyExc_RuntimeError,
+ tensorflow::strings::StrCat(
+ "collections.Sequence type has not been set. "
+ "Please call RegisterSequenceClass before using this module")
+ .c_str());
+ return -1;
+ }
+ return check_cache->CachedLookup(o);
+}
+
// Implements the same idea as tensorflow.util.nest._yield_value
// During construction we check if the iterable is a dictionary.
// If so, we construct a sequence from its sorted keys that will be used
@@ -96,7 +228,12 @@ string PyObjectToString(PyObject* o) {
// 'iterable' must not be modified while ValIterator is used.
class ValIterator {
public:
- explicit ValIterator(PyObject* iterable) : dict_(nullptr), index_(0) {
+ explicit ValIterator(PyObject* iterable)
+ : dict_(nullptr),
+ mapping_(nullptr),
+ last_mapping_element_(nullptr),
+ seq_(nullptr),
+ index_(0) {
if (PyDict_Check(iterable)) {
dict_ = iterable;
// PyDict_Keys returns a list, which can be used with
@@ -108,6 +245,10 @@ class ValIterator {
// bugs caused by mixing ordered and plain dicts (e.g., flattening
// a dict but using a corresponding `OrderedDict` to pack it back).
PyList_Sort(seq_);
+ } else if (IsMappingHelper(iterable)) {
+ mapping_ = iterable;
+ seq_ = MappingKeys(iterable);
+ PyList_Sort(seq_);
} else {
seq_ = PySequence_Fast(iterable, "");
}
@@ -122,7 +263,9 @@ class ValIterator {
PyObject* element = nullptr;
if (index_ < size_) {
// Both PySequence_Fast_GET_ITEM and PyDict_GetItem return borrowed
- // references.
+ // references. For general mappings, ValIterator keeps a reference to the
+ // last retrieved element (and decrefs it before producing the next
+ // element) to abstract away the borrowed/new difference.
element = PySequence_Fast_GET_ITEM(seq_, index_);
++index_;
if (dict_ != nullptr) {
@@ -132,85 +275,32 @@ class ValIterator {
"Dictionary was modified during iteration over it");
return nullptr;
}
+ } else if (mapping_ != nullptr) {
+ element = PyObject_GetItem(mapping_, element);
+ if (element == nullptr) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "Mapping was modified during iteration over it");
+ return nullptr;
+ }
+ last_mapping_element_.reset(element);
}
}
return element;
}
private:
- PyObject* seq_;
+ // Special casing for things that pass PyDict_Check (faster, no Python calls)
PyObject* dict_;
+
+ // General mappings which have custom Python logic
+ PyObject* mapping_;
+ Safe_PyObjectPtr last_mapping_element_;
+
+ PyObject* seq_;
Py_ssize_t size_;
Py_ssize_t index_;
};
-mutex g_type_to_sequence_map(LINKER_INITIALIZED);
-std::unordered_map<PyTypeObject*, bool>* IsTypeSequenceMap() {
- static auto* const m = new std::unordered_map<PyTypeObject*, bool>;
- return m;
-}
-
-// Returns 1 if `o` is considered a sequence for the purposes of Flatten().
-// Returns 0 otherwise.
-// Returns -1 if an error occurred.
-int IsSequenceHelper(PyObject* o) {
- if (PyDict_Check(o)) return true;
- if (PySet_Check(o) && !WarnedThatSetIsNotSequence) {
- LOG(WARNING) << "Sets are not currently considered sequences, "
- "but this may change in the future, "
- "so consider avoiding using them.";
- WarnedThatSetIsNotSequence = true;
- }
- if (TF_PREDICT_FALSE(CollectionsSequenceType == nullptr)) {
- PyErr_SetString(
- PyExc_RuntimeError,
- tensorflow::strings::StrCat(
- "collections.Sequence type has not been set. "
- "Please call RegisterSequenceClass before using this module")
- .c_str());
- return -1;
- }
-
- // Try not to return to Python - see if the type has already been seen
- // before.
-
- auto* type_to_sequence_map = IsTypeSequenceMap();
- auto* type = Py_TYPE(o);
-
- {
- mutex_lock l(g_type_to_sequence_map);
- auto it = type_to_sequence_map->find(type);
- if (it != type_to_sequence_map->end()) {
- return it->second;
- }
- }
-
- // NOTE: We explicitly release the g_type_to_sequence_map mutex,
- // because PyObject_IsInstance() may release the GIL, allowing another thread
- // concurrent entry to this function.
- int is_instance = PyObject_IsInstance(o, CollectionsSequenceType);
-
- // Don't cache a failed is_instance check.
- if (is_instance == -1) return -1;
-
- bool is_sequence = static_cast<int>(is_instance != 0 && !IsString(o));
-
- // NOTE: This is never decref'd, but we don't want the type to get deleted
- // as long as it is in the map. This should not be too much of a
- // leak, as there should only be a relatively small number of types in the
- // map, and an even smaller number that are eligible for decref. As a
- // precaution, we limit the size of the map to 1024.
- {
- mutex_lock l(g_type_to_sequence_map);
- if (type_to_sequence_map->size() < kMaxItemsInCache) {
- Py_INCREF(type);
- type_to_sequence_map->insert({type, is_sequence});
- }
- }
-
- return is_sequence;
-}
-
bool IsSparseTensorValueType(PyObject* o) {
if (TF_PREDICT_FALSE(SparseTensorValueType == nullptr)) {
return false;
@@ -226,21 +316,35 @@ int IsSequenceForDataHelper(PyObject* o) {
bool GetNextValuesForDict(PyObject* nested,
std::vector<Safe_PyObjectPtr>* next_values) {
- std::vector<PyObject*> result;
-
- PyObject* keys = PyDict_Keys(nested);
- if (PyList_Sort(keys) == -1) return false;
- Py_ssize_t size = PyList_Size(keys);
+ Safe_PyObjectPtr keys(PyDict_Keys(nested));
+ if (PyList_Sort(keys.get()) == -1) return false;
+ Py_ssize_t size = PyList_Size(keys.get());
for (Py_ssize_t i = 0; i < size; ++i) {
// We know that key and item will not be deleted because nested owns
// a reference to them and callers of flatten must not modify nested
// while the method is running.
- PyObject* key = PyList_GET_ITEM(keys, i);
+ PyObject* key = PyList_GET_ITEM(keys.get(), i);
PyObject* item = PyDict_GetItem(nested, key);
Py_INCREF(item);
next_values->emplace_back(item);
}
- Py_DECREF(keys);
+ return true;
+}
+
+bool GetNextValuesForMapping(PyObject* nested,
+ std::vector<Safe_PyObjectPtr>* next_values) {
+ Safe_PyObjectPtr keys(MappingKeys(nested));
+ if (keys.get() == nullptr) {
+ return false;
+ }
+ if (PyList_Sort(keys.get()) == -1) return false;
+ Py_ssize_t size = PyList_Size(keys.get());
+ for (Py_ssize_t i = 0; i < size; ++i) {
+ PyObject* key = PyList_GET_ITEM(keys.get(), i);
+ // Unlike PyDict_GetItem, PyObject_GetItem returns a new reference.
+ PyObject* item = PyObject_GetItem(nested, key);
+ next_values->emplace_back(item);
+ }
return true;
}
@@ -265,6 +369,9 @@ bool GetNextValues(PyObject* nested,
if (PyDict_Check(nested)) {
// if nested is dictionary, sort it by key and recurse on each value
return GetNextValuesForDict(nested, next_values);
+ } else if (IsMappingHelper(nested)) {
+ // same treatment as dictionaries, but for custom mapping types
+ return GetNextValuesForMapping(nested, next_values);
}
// iterate and recurse
return GetNextValuesForIterable(nested, next_values);
@@ -276,6 +383,9 @@ bool GetNextValuesForData(PyObject* nested,
if (PyDict_Check(nested)) {
// if nested is dictionary, sort it by key and recurse on each value
return GetNextValuesForDict(nested, next_values);
+ } else if (IsMappingHelper(nested)) {
+ // same treatment as dictionaries, but for custom mapping types
+ return GetNextValuesForMapping(nested, next_values);
} else if (IsSparseTensorValueType(nested)) {
// if nested is a SparseTensorValue, just return itself as a single item
Py_INCREF(nested);
@@ -320,8 +430,8 @@ bool FlattenHelper(
// 'dict1' and 'dict2' are assumed to be Python dictionaries.
void SetDifferentKeysError(PyObject* dict1, PyObject* dict2, string* error_msg,
bool* is_type_error) {
- PyObject* k1 = PyDict_Keys(dict1);
- PyObject* k2 = PyDict_Keys(dict2);
+ PyObject* k1 = MappingKeys(dict1);
+ PyObject* k2 = MappingKeys(dict2);
*is_type_error = false;
*error_msg = tensorflow::strings::StrCat(
"The two dictionaries don't have the same set of keys. "
@@ -396,9 +506,12 @@ bool AssertSameStructureHelper(PyObject* o1, PyObject* o2, bool check_types,
}
} else if (type1 != type2
/* If both sequences are list types, don't complain. This allows
- one to be a list subclass (e.g. _ListWrapper used for automatic
- dependency tracking.) */
- && !(PyList_Check(o1) && PyList_Check(o2))) {
+ one to be a list subclass (e.g. _ListWrapper used for
+ automatic dependency tracking.) */
+ && !(PyList_Check(o1) && PyList_Check(o2))
+ /* Two mapping types will also compare equal, making _DictWrapper
+ and dict compare equal. */
+ && !(IsMappingHelper(o1) && IsMappingHelper(o2))) {
*is_type_error = true;
*error_msg = tensorflow::strings::StrCat(
"The two namedtuples don't have the same sequence type. "
@@ -423,6 +536,24 @@ bool AssertSameStructureHelper(PyObject* o1, PyObject* o2, bool check_types,
return true;
}
}
+ } else if (IsMappingHelper(o1)) {
+ // Fallback for custom mapping types. Instead of using PyDict methods
+ // which stay in C, we call iter(o1).
+ if (PyMapping_Size(o1) != PyMapping_Size(o2)) {
+ SetDifferentKeysError(o1, o2, error_msg, is_type_error);
+ return true;
+ }
+
+ Safe_PyObjectPtr iter(PyObject_GetIter(o1));
+ PyObject* key;
+ while ((key = PyIter_Next(iter.get())) != nullptr) {
+ if (!PyMapping_HasKey(o2, key)) {
+ SetDifferentKeysError(o1, o2, error_msg, is_type_error);
+ Py_DECREF(key);
+ return true;
+ }
+ Py_DECREF(key);
+ }
}
}
@@ -470,6 +601,19 @@ void RegisterSequenceClass(PyObject* sequence_class) {
CollectionsSequenceType = sequence_class;
}
+void RegisterMappingClass(PyObject* mapping_class) {
+ if (!PyType_Check(mapping_class)) {
+ PyErr_SetString(
+ PyExc_TypeError,
+ tensorflow::strings::StrCat(
+ "Expecting a class definition for `collections.Mapping`. Got ",
+ Py_TYPE(mapping_class)->tp_name)
+ .c_str());
+ return;
+ }
+ CollectionsMappingType = mapping_class;
+}
+
void RegisterSparseTensorValueClass(PyObject* sparse_tensor_value_class) {
if (!PyType_Check(sparse_tensor_value_class)) {
PyErr_SetString(
diff --git a/tensorflow/python/util/util.h b/tensorflow/python/util/util.h
index 70efc10c9a..41dcc969f8 100644
--- a/tensorflow/python/util/util.h
+++ b/tensorflow/python/util/util.h
@@ -118,7 +118,9 @@ PyObject* Flatten(PyObject* nested);
// the type from the module. This approach also requires some trigger from
// Python so that we know that Python interpreter had been initialzied.
void RegisterSequenceClass(PyObject* sequence_class);
-// Similar to the above function, except for the
+// Like RegisterSequenceClass, but for collections.Mapping.
+void RegisterMappingClass(PyObject* mapping_class);
+// Similar to the above functions, except for the
// sparse_tensor.SparseTensorValue class.
void RegisterSparseTensorValueClass(PyObject* sparse_tensor_value_class);
diff --git a/tensorflow/python/util/util.i b/tensorflow/python/util/util.i
index 9f3b11b982..6ad1484295 100644
--- a/tensorflow/python/util/util.i
+++ b/tensorflow/python/util/util.i
@@ -31,6 +31,9 @@ limitations under the License.
%unignore tensorflow::swig::RegisterSequenceClass;
%noexception tensorflow::swig::RegisterSequenceClass;
+%unignore tensorflow::swig::RegisterMappingClass;
+%noexception tensorflow::swig::RegisterMappingClass;
+
%unignore tensorflow::swig::RegisterSparseTensorValueClass;
%noexception tensorflow::swig::RegisterSparseTensorValueClass;
diff --git a/tensorflow/security/advisory/tfsa-2018-001.md b/tensorflow/security/advisory/tfsa-2018-001.md
index bb97543a21..1966789c84 100644
--- a/tensorflow/security/advisory/tfsa-2018-001.md
+++ b/tensorflow/security/advisory/tfsa-2018-001.md
@@ -22,7 +22,7 @@ TensorFlow 1.3.0, 1.3.1, 1.4.0, 1.4.1, 1.5.0, 1.5.1, 1.6.0
### Mitigation
We have patched the vulnerability in GitHub commit
-[49f73c55](https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae4333c55).
+[49f73c55](https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae433).
If users are running TensorFlow in production or on untrusted data, they are
encouraged to apply this patch.
diff --git a/tensorflow/security/index.md b/tensorflow/security/index.md
index ea39e17ab2..0f176151c2 100644
--- a/tensorflow/security/index.md
+++ b/tensorflow/security/index.md
@@ -4,7 +4,7 @@ We regularly publish security advisories about using TensorFlow.
*Note*: In conjunction with these security advisories, we strongly encourage
TensorFlow users to read and understand TensorFlow's security model as outlined
-in (https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md)[SECURITY.md].
+in [SECURITY.md](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md).
| Advisory Number | Type | Versions affected | Reported by | Additional Information |
|-----------------|--------------------|:-----------------:|-----------------------|-----------------------------|
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc
index 84916385a8..766a0dafb5 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.cc
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc
@@ -791,6 +791,11 @@ class CudnnActivationDescriptor {
double relu_ceiling = 0.0;
cudnnActivationMode_t mode;
switch (activation_mode) {
+#if CUDNN_VERSION >= 7100
+ case dnn::ActivationMode::kNone:
+ mode = CUDNN_ACTIVATION_IDENTITY;
+ break;
+#endif
case dnn::ActivationMode::kRelu6:
relu_ceiling = 6.0;
mode = CUDNN_ACTIVATION_CLIPPED_RELU;
@@ -2480,10 +2485,11 @@ port::Status CudnnSupport::DoFusedConvolveImpl(
DeviceMemory<Type>* output_data, ScratchAllocator* scratch_allocator,
const dnn::AlgorithmConfig& algorithm_config,
dnn::ProfileResult* output_profile_result) {
- if (activation_mode != dnn::ActivationMode::kRelu) {
+ if (activation_mode != dnn::ActivationMode::kRelu &&
+ activation_mode != dnn::ActivationMode::kNone) {
return port::Status(port::error::INVALID_ARGUMENT,
"cudnnConvolutionBiasActivationForward() only supports "
- "Relu activation.");
+ "Relu or None activation.");
}
CudnnTensorDescriptor conv_input_nd(
@@ -3603,7 +3609,7 @@ bool CudnnSupport::DoPoolForward(
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) {
+ DeviceMemory<double>* output_data, ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
double alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3628,7 +3634,7 @@ bool CudnnSupport::DoPoolForward(
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) {
+ DeviceMemory<float>* output_data, ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3653,7 +3659,8 @@ bool CudnnSupport::DoPoolForward(
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) {
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3679,7 +3686,8 @@ bool CudnnSupport::DoPoolBackward(
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) {
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
double alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3708,7 +3716,8 @@ bool CudnnSupport::DoPoolBackward(
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) {
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3737,7 +3746,8 @@ bool CudnnSupport::DoPoolBackward(
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) {
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
// Alpha is the scaling factor for input.
float alpha = 1.0;
// Beta is the scaling factor for output.
@@ -3806,7 +3816,8 @@ bool CudnnSupport::DoNormalizeBackwardWithDimensions(
const dnn::BatchDescriptor& dimensions, const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) {
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator) {
// Check for unsupported modes.
if (normalize_descriptor.wrap_around()) {
LOG(ERROR) << "CUDA LRN does not support cudnn-around mode";
diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.h b/tensorflow/stream_executor/cuda/cuda_dnn.h
index c924d41cb5..9d88f971bb 100644
--- a/tensorflow/stream_executor/cuda/cuda_dnn.h
+++ b/tensorflow/stream_executor/cuda/cuda_dnn.h
@@ -515,21 +515,24 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) override;
+ DeviceMemory<double>* output_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) override;
+ DeviceMemory<float>* output_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) override;
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolBackward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
@@ -538,7 +541,8 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) override;
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolBackward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
@@ -547,7 +551,8 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) override;
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoPoolBackward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
@@ -556,7 +561,8 @@ class CudnnSupport : public dnn::DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) override;
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator) override;
bool DoNormalize(Stream* stream,
const dnn::NormalizeDescriptor& normalize_descriptor,
@@ -575,7 +581,8 @@ class CudnnSupport : public dnn::DnnSupport {
const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) override;
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator) override;
bool DoDepthConcatenate(
Stream* stream, port::ArraySlice<dnn::BatchDescriptor> input_dimensions,
diff --git a/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc b/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
index f11022ef1d..73f05b94db 100644
--- a/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
+++ b/tensorflow/stream_executor/cuda/cuda_gpu_executor.cc
@@ -206,6 +206,48 @@ static string GetBinaryDir(bool strip_exe) {
return exe_path;
}
+bool CUDAExecutor::LoadModuleFromCuBin(const char *cubin, CUmodule *module) {
+ uint64_t module_refcount;
+ std::tie(*module, module_refcount) = gpu_binary_to_module_[cubin];
+
+ if (*module == nullptr) {
+ auto load_status = CUDADriver::LoadCubin(context_, cubin, module);
+ if (!load_status.ok()) {
+ LOG(ERROR) << "failed to load CUBIN: " << load_status;
+ return false;
+ }
+ module_refcount = 1;
+ VLOG(3) << "Loaded CUBIN " << static_cast<const void *>(cubin)
+ << " as module " << *module;
+ } else {
+ ++module_refcount;
+ VLOG(3) << "CUBIN " << static_cast<const void *>(cubin)
+ << " is already loaded as module " << *module;
+ }
+ gpu_binary_to_module_[cubin] = {*module, module_refcount};
+ return true;
+}
+
+bool CUDAExecutor::LoadModuleFromPtx(const char *ptx, CUmodule *module) {
+ uint64_t module_refcount;
+ std::tie(*module, module_refcount) = gpu_binary_to_module_[ptx];
+
+ if (*module == nullptr) {
+ if (!CUDADriver::LoadPtx(context_, ptx, module)) {
+ return false;
+ }
+ VLOG(3) << "Loaded PTX " << static_cast<const void *>(ptx) << " as module "
+ << *module;
+ module_refcount = 1;
+ } else {
+ ++module_refcount;
+ VLOG(3) << "PTX " << static_cast<const void *>(ptx)
+ << " is already loaded as module " << module;
+ }
+ gpu_binary_to_module_[ptx] = {*module, module_refcount};
+ return true;
+}
+
bool CUDAExecutor::GetKernel(const MultiKernelLoaderSpec &spec,
KernelBase *kernel) {
CUDAKernel *cuda_kernel = AsCUDAKernel(kernel);
@@ -215,28 +257,13 @@ bool CUDAExecutor::GetKernel(const MultiKernelLoaderSpec &spec,
VLOG(3) << "GetKernel on kernel " << kernel << " : " << kernel->name();
if (spec.has_cuda_cubin_in_memory()) {
+ mutex_lock lock{in_memory_modules_mu_};
kernelname = &spec.cuda_cubin_in_memory().kernelname();
const char *cubin = spec.cuda_cubin_in_memory().bytes();
- mutex_lock lock{in_memory_modules_mu_};
- uint64_t module_refcount;
- std::tie(module, module_refcount) = gpu_binary_to_module_[cubin];
-
- if (module == nullptr) {
- auto load_status = CUDADriver::LoadCubin(context_, cubin, &module);
- if (!load_status.ok()) {
- LOG(ERROR) << "failed to load CUBIN: " << load_status;
- return false;
- }
- module_refcount = 1;
- VLOG(3) << "Loaded CUBIN " << static_cast<const void *>(cubin)
- << " as module " << module;
- } else {
- ++module_refcount;
- VLOG(3) << "CUBIN " << static_cast<const void *>(cubin)
- << " is already loaded as module " << module;
+ if (!LoadModuleFromCuBin(cubin, &module)) {
+ return false;
}
kernel_to_gpu_binary_[kernel] = cubin;
- gpu_binary_to_module_[cubin] = {module, module_refcount};
} else if (spec.has_cuda_ptx_in_memory()) {
kernelname = &spec.cuda_ptx_in_memory().kernelname();
@@ -254,24 +281,10 @@ bool CUDAExecutor::GetKernel(const MultiKernelLoaderSpec &spec,
}
mutex_lock lock{in_memory_modules_mu_};
- uint64_t module_refcount;
- std::tie(module, module_refcount) = gpu_binary_to_module_[ptx];
-
- if (module == nullptr) {
- if (!CUDADriver::LoadPtx(context_, ptx, &module)) {
- LOG(ERROR) << "failed to load PTX for kernel " << *kernelname;
- return false;
- }
- VLOG(3) << "Loaded PTX " << static_cast<const void *>(ptx)
- << " as module " << module;
- module_refcount = 1;
- } else {
- ++module_refcount;
- VLOG(3) << "PTX " << static_cast<const void *>(ptx)
- << " is already loaded as module " << module;
+ if (!LoadModuleFromPtx(ptx, &module)) {
+ return false;
}
kernel_to_gpu_binary_[kernel] = ptx;
- gpu_binary_to_module_[ptx] = {module, module_refcount};
} else {
LOG(WARNING) << "no method of loading CUDA kernel provided";
return false;
@@ -295,6 +308,23 @@ bool CUDAExecutor::GetKernel(const MultiKernelLoaderSpec &spec,
return true;
}
+bool CUDAExecutor::UnloadGpuBinary(const void *gpu_binary) {
+ auto module_it = gpu_binary_to_module_.find(gpu_binary);
+ if (gpu_binary_to_module_.end() == module_it) {
+ VLOG(3) << "No loaded CUDA module for " << gpu_binary;
+ return false;
+ }
+ auto &module = module_it->second.first;
+ auto &refcount = module_it->second.second;
+ VLOG(3) << "Found CUDA module " << module << " with refcount " << refcount;
+ if (--refcount == 0) {
+ VLOG(3) << "Unloading CUDA module " << module;
+ CUDADriver::UnloadModule(context_, module);
+ gpu_binary_to_module_.erase(module_it);
+ }
+ return true;
+}
+
void CUDAExecutor::UnloadKernel(const KernelBase *kernel) {
VLOG(3) << "Unloading kernel " << kernel << " : " << kernel->name();
@@ -307,25 +337,52 @@ void CUDAExecutor::UnloadKernel(const KernelBase *kernel) {
}
VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
<< " has loaded GPU code " << gpu_binary_it->second;
- auto module_it = gpu_binary_to_module_.find(gpu_binary_it->second);
- if (gpu_binary_to_module_.end() == module_it) {
- VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
- << " has no loaded CUDA module.";
- return; // This kernel never loaded any modules
- }
- auto &module = module_it->second.first;
- auto &refcount = module_it->second.second;
- VLOG(3) << "Kernel " << kernel << " : " << kernel->name()
- << " has loaded GPU code " << gpu_binary_it->second
- << " into CUDA module " << module << " with refcount " << refcount;
- if (--refcount == 0) {
- VLOG(3) << "Unloading CUDA module " << module;
- CUDADriver::UnloadModule(context_, module);
- gpu_binary_to_module_.erase(module_it);
- }
+ UnloadGpuBinary(gpu_binary_it->second);
kernel_to_gpu_binary_.erase(gpu_binary_it);
}
+bool CUDAExecutor::LoadModule(const MultiModuleLoaderSpec &spec,
+ ModuleHandle *module_handle) {
+ // In CUDAExecutor we store the pointer to the GPU binary (PTX or CUBIN) as
+ // ModuleHandle::id().
+ CUmodule cu_module;
+ if (spec.has_cuda_cubin_in_memory()) {
+ mutex_lock lock{in_memory_modules_mu_};
+ if (!LoadModuleFromCuBin(
+ reinterpret_cast<const char *>(spec.cuda_cubin_in_memory().data()),
+ &cu_module)) {
+ return false;
+ }
+ *module_handle = ModuleHandle(const_cast<void *>(
+ static_cast<const void *>(spec.cuda_cubin_in_memory().data())));
+ return true;
+ } else if (spec.has_cuda_ptx_in_memory()) {
+ if (cc_major_ == 0 && cc_minor_ == 0) {
+ return false;
+ }
+
+ if (!spec.cuda_ptx_in_memory()) {
+ return false;
+ }
+
+ mutex_lock lock{in_memory_modules_mu_};
+ if (!LoadModuleFromPtx(spec.cuda_ptx_in_memory(), &cu_module)) {
+ return false;
+ }
+ *module_handle = ModuleHandle(const_cast<void *>(
+ static_cast<const void *>(spec.cuda_ptx_in_memory())));
+ return true;
+ }
+ LOG(WARNING) << "no method of loading CUDA module provided";
+ return false;
+}
+
+bool CUDAExecutor::UnloadModule(ModuleHandle module_handle) {
+ const char *gpu_binary = reinterpret_cast<const char *>(module_handle.id());
+ mutex_lock lock{in_memory_modules_mu_};
+ return UnloadGpuBinary(gpu_binary);
+}
+
bool CUDAExecutor::GetKernelMetadata(CUDAKernel *cuda_kernel,
KernelMetadata *kernel_metadata) {
int value;
@@ -783,16 +840,26 @@ bool CUDAExecutor::DeviceMemoryUsage(int64 *free, int64 *total) const {
return CUDADriver::GetDeviceMemoryInfo(context_, free, total);
}
-bool CUDAExecutor::GetSymbol(const string& symbol_name, void **mem,
+bool CUDAExecutor::GetSymbol(const string &symbol_name,
+ ModuleHandle module_handle, void **mem,
size_t *bytes) {
+ auto lookup_in_module = [&](CUmodule module) {
+ CHECK(module != nullptr);
+ return CUDADriver::GetModuleSymbol(context_, module, symbol_name.c_str(),
+ reinterpret_cast<CUdeviceptr *>(mem),
+ bytes);
+ };
+
{ // give limited scope to mutex_lock
mutex_lock lock{in_memory_modules_mu_};
+ if (static_cast<bool>(module_handle)) {
+ auto it = gpu_binary_to_module_.find(module_handle.id());
+ CHECK(it != gpu_binary_to_module_.end());
+ return lookup_in_module(it->second.first);
+ }
+
for (auto &it : gpu_binary_to_module_) {
- CUmodule module = it.second.first;
- CHECK(module != nullptr);
- if (CUDADriver::GetModuleSymbol(context_, module, symbol_name.c_str(),
- reinterpret_cast<CUdeviceptr *>(mem),
- bytes)) {
+ if (lookup_in_module(it.second.first)) {
return true;
}
}
@@ -844,7 +911,7 @@ CUDAExecutor::GetTimerImplementation() {
return std::unique_ptr<internal::TimerInterface>(new CUDATimer(this));
}
-void *CUDAExecutor::CudaContextHack() { return context_; }
+void *CUDAExecutor::GpuContextHack() { return context_; }
CudaContext* CUDAExecutor::cuda_context() { return context_; }
diff --git a/tensorflow/stream_executor/cuda/cuda_gpu_executor.h b/tensorflow/stream_executor/cuda/cuda_gpu_executor.h
index 773cbfb8a1..8a954d5461 100644
--- a/tensorflow/stream_executor/cuda/cuda_gpu_executor.h
+++ b/tensorflow/stream_executor/cuda/cuda_gpu_executor.h
@@ -62,6 +62,9 @@ class CUDAExecutor : public internal::StreamExecutorInterface {
bool GetKernel(const MultiKernelLoaderSpec &spec,
KernelBase *kernel) override;
void UnloadKernel(const KernelBase *kernel) override;
+ bool LoadModule(const MultiModuleLoaderSpec &spec,
+ ModuleHandle *module_handle) override;
+ bool UnloadModule(ModuleHandle module_handle) override;
bool Launch(Stream *stream, const ThreadDim &thread_dims,
const BlockDim &block_dims, const KernelBase &k,
@@ -175,7 +178,8 @@ class CUDAExecutor : public internal::StreamExecutorInterface {
// Search for the symbol and returns a device pointer and size.
// Returns false if symbol does not exist.
- bool GetSymbol(const string& symbol_name, void **mem, size_t *bytes) override;
+ bool GetSymbol(const string &symbol_name, ModuleHandle module_handle,
+ void **mem, size_t *bytes) override;
DeviceDescription *PopulateDeviceDescription() const override;
@@ -210,7 +214,7 @@ class CUDAExecutor : public internal::StreamExecutorInterface {
std::unique_ptr<internal::TimerInterface> GetTimerImplementation() override;
- void *CudaContextHack() override;
+ void *GpuContextHack() override;
CudaContext* cuda_context();
@@ -239,6 +243,16 @@ class CUDAExecutor : public internal::StreamExecutorInterface {
void VlogOccupancyInfo(const KernelBase &kernel, const ThreadDim &thread_dims,
const BlockDim &block_dims);
+ bool LoadModuleFromCuBin(const char *cubin, CUmodule *module)
+ EXCLUSIVE_LOCKS_REQUIRED(in_memory_modules_mu_);
+
+ // Loads the PTX text `ptx` as a CUDA module. `ptx` must be null terminated.
+ bool LoadModuleFromPtx(const char *ptx, CUmodule *module)
+ EXCLUSIVE_LOCKS_REQUIRED(in_memory_modules_mu_);
+
+ bool UnloadGpuBinary(const void *gpu_binary)
+ EXCLUSIVE_LOCKS_REQUIRED(in_memory_modules_mu_);
+
// Guards the in-memory-module mapping.
mutex in_memory_modules_mu_;
diff --git a/tensorflow/stream_executor/cuda/cuda_stream.h b/tensorflow/stream_executor/cuda/cuda_stream.h
index 02edff6431..bb8bda4755 100644
--- a/tensorflow/stream_executor/cuda/cuda_stream.h
+++ b/tensorflow/stream_executor/cuda/cuda_stream.h
@@ -40,8 +40,8 @@ class CUDAStream : public internal::StreamInterface {
// Note: teardown is handled by a parent's call to DeallocateStream.
~CUDAStream() override {}
- void *CudaStreamHack() override { return cuda_stream_; }
- void **CudaStreamMemberHack() override {
+ void *GpuStreamHack() override { return cuda_stream_; }
+ void **GpuStreamMemberHack() override {
return reinterpret_cast<void **>(&cuda_stream_);
}
diff --git a/tensorflow/stream_executor/dnn.cc b/tensorflow/stream_executor/dnn.cc
index 82aa8ceb32..2a30f922bc 100644
--- a/tensorflow/stream_executor/dnn.cc
+++ b/tensorflow/stream_executor/dnn.cc
@@ -117,6 +117,8 @@ string FilterLayoutString(FilterLayout layout) {
switch (layout) {
case FilterLayout::kOutputInputYX:
return "OutputInputYX";
+ case FilterLayout::kOutputYXInput:
+ return "OutputYXInput";
case FilterLayout::kOutputInputYX4:
return "OutputInputYX4";
case FilterLayout::kInputYXOutput:
diff --git a/tensorflow/stream_executor/dnn.h b/tensorflow/stream_executor/dnn.h
index 9eca5abe1a..a7449c2df4 100644
--- a/tensorflow/stream_executor/dnn.h
+++ b/tensorflow/stream_executor/dnn.h
@@ -1552,14 +1552,16 @@ class DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<float>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<float>* output_data) = 0;
+ DeviceMemory<float>* output_data,
+ ScratchAllocator* workspace_allocator) = 0;
virtual bool DoPoolForward(Stream* stream,
const dnn::PoolingDescriptor& pooling_dimensions,
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<double>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<double>* output_data) {
+ DeviceMemory<double>* output_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolForward not implemented for double.";
return false;
}
@@ -1569,7 +1571,8 @@ class DnnSupport {
const dnn::BatchDescriptor& input_dimensions,
const DeviceMemory<Eigen::half>& input_data,
const dnn::BatchDescriptor& output_dimensions,
- DeviceMemory<Eigen::half>* output_data) {
+ DeviceMemory<Eigen::half>* output_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolForward not implemented for float16.";
return false;
}
@@ -1582,7 +1585,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<double>& output_data,
const DeviceMemory<double>& input_diff_data,
- DeviceMemory<double>* output_diff_data) {
+ DeviceMemory<double>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1594,7 +1598,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<float>& output_data,
const DeviceMemory<float>& input_diff_data,
- DeviceMemory<float>* output_diff_data) {
+ DeviceMemory<float>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1606,7 +1611,8 @@ class DnnSupport {
const dnn::BatchDescriptor& output_dimensions,
const DeviceMemory<Eigen::half>& output_data,
const DeviceMemory<Eigen::half>& input_diff_data,
- DeviceMemory<Eigen::half>* output_diff_data) {
+ DeviceMemory<Eigen::half>* output_diff_data,
+ ScratchAllocator* workspace_allocator) {
LOG(FATAL) << "DoPoolBackward not implemented.";
return false;
}
@@ -1653,7 +1659,8 @@ class DnnSupport {
const DeviceMemory<float>& raw_data,
const DeviceMemory<float>& normalized_data,
const DeviceMemory<float>& normalized_variable_gradient,
- DeviceMemory<float>* raw_variable_gradient) {
+ DeviceMemory<float>* raw_variable_gradient,
+ ScratchAllocator* workspace_allocator) {
return false;
}
diff --git a/tensorflow/stream_executor/host/host_gpu_executor.cc b/tensorflow/stream_executor/host/host_gpu_executor.cc
index 3cd97b3cf1..8adf739b17 100644
--- a/tensorflow/stream_executor/host/host_gpu_executor.cc
+++ b/tensorflow/stream_executor/host/host_gpu_executor.cc
@@ -93,7 +93,7 @@ bool HostExecutor::MemcpyDeviceToDevice(Stream *stream,
// the nature of the HostExecutor) memcpy on the stream (HostStream)
// associated with the HostExecutor.
AsHostStream(stream)->EnqueueTask(
- [src_mem, dst_mem, size]() { memcpy(src_mem, dst_mem, size); });
+ [src_mem, dst_mem, size]() { memcpy(dst_mem, src_mem, size); });
return true;
}
diff --git a/tensorflow/stream_executor/host/host_gpu_executor.h b/tensorflow/stream_executor/host/host_gpu_executor.h
index e82f57569f..858396ef96 100644
--- a/tensorflow/stream_executor/host/host_gpu_executor.h
+++ b/tensorflow/stream_executor/host/host_gpu_executor.h
@@ -202,7 +202,7 @@ class HostExecutor : public internal::StreamExecutorInterface {
return std::unique_ptr<internal::TimerInterface>(new HostTimer());
}
- void *CudaContextHack() override { return nullptr; }
+ void *GpuContextHack() override { return nullptr; }
private:
const PluginConfig plugin_config_;
diff --git a/tensorflow/stream_executor/host/host_stream.h b/tensorflow/stream_executor/host/host_stream.h
index 5d7b8a3782..be88f074cf 100644
--- a/tensorflow/stream_executor/host/host_stream.h
+++ b/tensorflow/stream_executor/host/host_stream.h
@@ -34,8 +34,8 @@ class HostStream : public internal::StreamInterface {
bool EnqueueTask(std::function<void()> task);
- void *CudaStreamHack() override { return nullptr; }
- void **CudaStreamMemberHack() override { return nullptr; }
+ void *GpuStreamHack() override { return nullptr; }
+ void **GpuStreamMemberHack() override { return nullptr; }
void BlockUntilDone();
diff --git a/tensorflow/stream_executor/module_spec.h b/tensorflow/stream_executor/module_spec.h
new file mode 100644
index 0000000000..212ae7ba9c
--- /dev/null
+++ b/tensorflow/stream_executor/module_spec.h
@@ -0,0 +1,65 @@
+/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_STREAM_EXECUTOR_MODULE_SPEC_H_
+#define TENSORFLOW_STREAM_EXECUTOR_MODULE_SPEC_H_
+
+#include "tensorflow/stream_executor/lib/array_slice.h"
+#include "tensorflow/stream_executor/lib/stringpiece.h"
+#include "tensorflow/stream_executor/platform/logging.h"
+#include "tensorflow/stream_executor/platform/port.h"
+
+namespace stream_executor {
+
+// Describes how to load a module on a target platform.
+//
+// The exact meaning of a "module" may differ from platform to platform but
+// loosely speaking a module a collection of kernels and global variables. It
+// corresponds to CUmodule when running on CUDA.
+class MultiModuleLoaderSpec {
+ public:
+ bool has_cuda_cubin_in_memory() const { return has_cuda_cubin_in_memory_; }
+ port::ArraySlice<const uint8> cuda_cubin_in_memory() const {
+ CHECK(has_cuda_cubin_in_memory());
+ return {cuda_cubin_in_memory_.data(), cuda_cubin_in_memory_.size()};
+ }
+
+ bool has_cuda_ptx_in_memory() const { return has_cuda_ptx_in_memory_; }
+ const char* cuda_ptx_in_memory() const {
+ CHECK(has_cuda_ptx_in_memory());
+ return cuda_ptx_in_memory_;
+ }
+
+ void AddCudaCubinInMemory(port::ArraySlice<const uint8> cubin_bytes) {
+ has_cuda_cubin_in_memory_ = true;
+ cuda_cubin_in_memory_ = cubin_bytes;
+ }
+
+ void AddCudaPtxInMemory(const char* ptx) {
+ has_cuda_ptx_in_memory_ = true;
+ // The CUDA driver does not like getting an empty string as PTX.
+ cuda_ptx_in_memory_ = *ptx ? ptx : nullptr;
+ }
+
+ private:
+ port::ArraySlice<const uint8> cuda_cubin_in_memory_;
+ bool has_cuda_cubin_in_memory_ = false;
+ const char* cuda_ptx_in_memory_;
+ bool has_cuda_ptx_in_memory_ = false;
+};
+
+} // namespace stream_executor
+
+#endif // TENSORFLOW_STREAM_EXECUTOR_MODULE_SPEC_H_
diff --git a/tensorflow/stream_executor/stream.cc b/tensorflow/stream_executor/stream.cc
index 9369183133..2c495c99e1 100644
--- a/tensorflow/stream_executor/stream.cc
+++ b/tensorflow/stream_executor/stream.cc
@@ -268,6 +268,12 @@ Stream::~Stream() {
VLOG_CALL();
temporary_memory_manager_.ForceDeallocateAll();
+ // Ensure the stream is completed.
+ auto status = BlockHostUntilDone();
+ if (!status.ok()) {
+ LOG(WARNING) << "Error blocking host until done in stream destructor: "
+ << status;
+ }
if (allocated_) {
parent_->DeallocateStream(this);
@@ -1377,15 +1383,16 @@ Stream &Stream::ThenPoolForward(
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<double> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<double> *output_data) {
+ DeviceMemory<double> *output_data, ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
- PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+ PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
- input_data, output_dimensions,
- output_data));
+ input_data, output_dimensions, output_data,
+ workspace_allocator));
} else {
SetError();
LOG(WARNING)
@@ -1401,15 +1408,16 @@ Stream &Stream::ThenPoolForward(
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<float> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<float> *output_data) {
+ DeviceMemory<float> *output_data, ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
- PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+ PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
- input_data, output_dimensions,
- output_data));
+ input_data, output_dimensions, output_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1422,15 +1430,17 @@ Stream &Stream::ThenPoolForward(
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<Eigen::half> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<Eigen::half> *output_data) {
+ DeviceMemory<Eigen::half> *output_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
- PARAM(input_data), PARAM(output_dimensions), PARAM(output_data));
+ PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolForward(this, pooling_dimensions, input_dimensions,
- input_data, output_dimensions,
- output_data));
+ input_data, output_dimensions, output_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1445,16 +1455,19 @@ Stream &Stream::ThenPoolBackward(
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<double> &output_data,
const DeviceMemory<double> &input_diff_data,
- DeviceMemory<double> *output_diff_data) {
+ DeviceMemory<double> *output_diff_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
- PARAM(input_diff_data), PARAM(output_diff_data));
+ PARAM(input_diff_data), PARAM(output_diff_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
- input_diff_data, output_diff_data));
+ input_diff_data, output_diff_data,
+ workspace_allocator));
} else {
SetError();
LOG(WARNING)
@@ -1472,16 +1485,19 @@ Stream &Stream::ThenPoolBackward(
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<float> &output_data,
const DeviceMemory<float> &input_diff_data,
- DeviceMemory<float> *output_diff_data) {
+ DeviceMemory<float> *output_diff_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
- PARAM(input_diff_data), PARAM(output_diff_data));
+ PARAM(input_diff_data), PARAM(output_diff_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
- input_diff_data, output_diff_data));
+ input_diff_data, output_diff_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1496,16 +1512,19 @@ Stream &Stream::ThenPoolBackward(
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<Eigen::half> &output_data,
const DeviceMemory<Eigen::half> &input_diff_data,
- DeviceMemory<Eigen::half> *output_diff_data) {
+ DeviceMemory<Eigen::half> *output_diff_data,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(pooling_dimensions), PARAM(input_dimensions),
PARAM(input_data), PARAM(output_dimensions), PARAM(output_data),
- PARAM(input_diff_data), PARAM(output_diff_data));
+ PARAM(input_diff_data), PARAM(output_diff_data),
+ PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoPoolBackward(this, pooling_dimensions, input_dimensions,
input_data, output_dimensions, output_data,
- input_diff_data, output_diff_data));
+ input_diff_data, output_diff_data,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
@@ -1552,16 +1571,18 @@ Stream &Stream::ThenNormalizeBackwardWithDimensions(
const dnn::BatchDescriptor &dimensions, const DeviceMemory<float> &raw_data,
const DeviceMemory<float> &normalized_data,
const DeviceMemory<float> &normalized_variable_gradient,
- DeviceMemory<float> *raw_variable_gradient) {
+ DeviceMemory<float> *raw_variable_gradient,
+ ScratchAllocator *workspace_allocator) {
VLOG_CALL(PARAM(normalize_descriptor), PARAM(dimensions), PARAM(raw_data),
PARAM(normalized_data), PARAM(normalized_variable_gradient),
- PARAM(raw_variable_gradient));
+ PARAM(raw_variable_gradient), PARAM(workspace_allocator));
if (ok()) {
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
CheckError(dnn->DoNormalizeBackwardWithDimensions(
this, normalize_descriptor, dimensions, raw_data, normalized_data,
- normalized_variable_gradient, raw_variable_gradient));
+ normalized_variable_gradient, raw_variable_gradient,
+ workspace_allocator));
} else {
SetErrorAndLogNoDnnSupport();
}
diff --git a/tensorflow/stream_executor/stream.h b/tensorflow/stream_executor/stream.h
index e8885e1eb6..63d64947c8 100644
--- a/tensorflow/stream_executor/stream.h
+++ b/tensorflow/stream_executor/stream.h
@@ -629,19 +629,22 @@ class Stream {
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<double> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<double> *output_data);
+ DeviceMemory<double> *output_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolForward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<float> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<float> *output_data);
+ DeviceMemory<float> *output_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolForward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
const DeviceMemory<Eigen::half> &input_data,
const dnn::BatchDescriptor &output_dimensions,
- DeviceMemory<Eigen::half> *output_data);
+ DeviceMemory<Eigen::half> *output_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
@@ -649,7 +652,8 @@ class Stream {
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<double> &output_data,
const DeviceMemory<double> &input_diff_data,
- DeviceMemory<double> *output_diff_data);
+ DeviceMemory<double> *output_diff_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
@@ -657,7 +661,8 @@ class Stream {
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<float> &output_data,
const DeviceMemory<float> &input_diff_data,
- DeviceMemory<float> *output_diff_data);
+ DeviceMemory<float> *output_diff_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenPoolBackward(const dnn::PoolingDescriptor &pooling_dimensions,
const dnn::BatchDescriptor &input_dimensions,
@@ -665,7 +670,8 @@ class Stream {
const dnn::BatchDescriptor &output_dimensions,
const DeviceMemory<Eigen::half> &output_data,
const DeviceMemory<Eigen::half> &input_diff_data,
- DeviceMemory<Eigen::half> *output_diff_data);
+ DeviceMemory<Eigen::half> *output_diff_data,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenNormalize(const dnn::NormalizeDescriptor &normalize_descriptor,
const DeviceMemory<float> &input_data,
@@ -684,7 +690,8 @@ class Stream {
const DeviceMemory<float> &raw_data,
const DeviceMemory<float> &normalized_data,
const DeviceMemory<float> &normalized_variable_gradient,
- DeviceMemory<float> *raw_variable_gradient);
+ DeviceMemory<float> *raw_variable_gradient,
+ ScratchAllocator *workspace_allocator = nullptr);
Stream &ThenActivate(dnn::ActivationMode activation_mode,
const dnn::BatchDescriptor &dimensions,
diff --git a/tensorflow/stream_executor/stream_executor_internal.h b/tensorflow/stream_executor/stream_executor_internal.h
index 9c989b971d..f34b1fc083 100644
--- a/tensorflow/stream_executor/stream_executor_internal.h
+++ b/tensorflow/stream_executor/stream_executor_internal.h
@@ -36,20 +36,38 @@ limitations under the License.
#include "tensorflow/stream_executor/kernel_cache_config.h"
#include "tensorflow/stream_executor/kernel_spec.h"
#include "tensorflow/stream_executor/launch_dim.h"
+#include "tensorflow/stream_executor/lib/inlined_vector.h"
#include "tensorflow/stream_executor/lib/status.h"
#include "tensorflow/stream_executor/lib/statusor.h"
+#include "tensorflow/stream_executor/module_spec.h"
#include "tensorflow/stream_executor/platform.h"
#include "tensorflow/stream_executor/platform/port.h"
#include "tensorflow/stream_executor/plugin_registry.h"
#include "tensorflow/stream_executor/shared_memory_config.h"
#include "tensorflow/stream_executor/trace_listener.h"
-#include "tensorflow/stream_executor/lib/inlined_vector.h"
namespace stream_executor {
class Stream;
class Timer;
+// An opaque handle to a loaded module.
+//
+// An instance of this is returned from StreamExecutor::GetModule.
+class ModuleHandle {
+ public:
+ /*implicit*/ ModuleHandle(void *id = nullptr) : id_(id) {}
+
+ // A ModuleHandle with id() == nullptr is an invalid module handle, akin to a
+ // null pointer.
+ void *id() const { return id_; }
+
+ explicit operator bool() const { return id() != nullptr; }
+
+ private:
+ void *id_;
+};
+
namespace internal {
// Platform-dependent interface class for the generic Events interface, in
@@ -100,19 +118,20 @@ class StreamInterface {
// Default destructor for the abstract interface.
virtual ~StreamInterface() {}
- // Returns the CUDA stream associated with this platform's stream
+ // Returns the GPU stream associated with this platform's stream
// implementation.
//
- // WARNING: checks that the underlying platform is, in fact, CUDA, causing a
- // fatal error if it is not. This hack is made available solely for use from
- // distbelief code, which temporarily has strong ties to CUDA as a platform.
- virtual void *CudaStreamHack() { return nullptr; }
-
- // See the above comment on CudaStreamHack -- this further breaks abstraction
- // for Eigen within distbelief, which has strong ties to CUDA as a platform,
- // and a historical attachment to a programming model which takes a
+ // WARNING: checks that the underlying platform is, in fact, CUDA or ROCm,
+ // causing a fatal error if it is not. This hack is made available solely for
+ // use from distbelief code, which temporarily has strong ties to CUDA or
+ // ROCm as a platform.
+ virtual void *GpuStreamHack() { return nullptr; }
+
+ // See the above comment on GpuStreamHack -- this further breaks abstraction
+ // for Eigen within distbelief, which has strong ties to CUDA or ROCm as a
+ // platform, and a historical attachment to a programming model which takes a
// stream-slot rather than a stream-value.
- virtual void **CudaStreamMemberHack() { return nullptr; }
+ virtual void **GpuStreamMemberHack() { return nullptr; }
private:
SE_DISALLOW_COPY_AND_ASSIGN(StreamInterface);
@@ -163,6 +182,11 @@ class StreamExecutorInterface {
KernelBase *kernel) {
return false;
}
+ virtual bool LoadModule(const MultiModuleLoaderSpec &spec,
+ ModuleHandle *module_handle) {
+ return false;
+ }
+ virtual bool UnloadModule(ModuleHandle module_handle) { return false; }
virtual bool Launch(Stream *stream, const ThreadDim &thread_dims,
const BlockDim &block_dims, const KernelBase &k,
const KernelArgsArrayBase &args) {
@@ -246,7 +270,12 @@ class StreamExecutorInterface {
// null, however, both of them cannot be null at the same time. To use
// constant memory in CUDA, GetSymbol has to be used. Returns true if symbol
// is found.
- virtual bool GetSymbol(const string& symbol_name, void **mem, size_t *bytes) {
+ //
+ // If ModuleHandle is set then we search for `symbol_name` only within the
+ // module corresponding to `module_handle`. Otherwise all loaded modules are
+ // searched.
+ virtual bool GetSymbol(const string &symbol_name, ModuleHandle module_handle,
+ void **mem, size_t *bytes) {
return false;
}
@@ -324,13 +353,14 @@ class StreamExecutorInterface {
virtual std::unique_ptr<StreamInterface> GetStreamImplementation() = 0;
virtual std::unique_ptr<TimerInterface> GetTimerImplementation() = 0;
- // Returns the CUDA context associated with this StreamExecutor platform
- // implementation.
+ // Returns the CUDA or ROCm context associated with this StreamExecutor
+ // platform implementation.
//
- // WARNING: checks that the underlying platform is, in fact, CUDA, causing a
- // fatal error if it is not. This hack is made available solely for use from
- // distbelief code, which temporarily has strong ties to CUDA as a platform.
- virtual void *CudaContextHack() { return nullptr; }
+ // WARNING: checks that the underlying platform is, in fact, CUDA or ROCm,
+ // causing a fatal error if it is not. This hack is made available solely for
+ // use from distbelief code, which temporarily has strong ties to CUDA or ROCm
+ // as a platform.
+ virtual void *GpuContextHack() { return nullptr; }
private:
SE_DISALLOW_COPY_AND_ASSIGN(StreamExecutorInterface);
diff --git a/tensorflow/stream_executor/stream_executor_pimpl.cc b/tensorflow/stream_executor/stream_executor_pimpl.cc
index 000795ff00..2e0137a485 100644
--- a/tensorflow/stream_executor/stream_executor_pimpl.cc
+++ b/tensorflow/stream_executor/stream_executor_pimpl.cc
@@ -220,6 +220,15 @@ void StreamExecutor::UnloadKernel(const KernelBase *kernel) {
implementation_->UnloadKernel(kernel);
}
+bool StreamExecutor::LoadModule(const MultiModuleLoaderSpec &spec,
+ ModuleHandle *module_handle) {
+ return implementation_->LoadModule(spec, module_handle);
+}
+
+bool StreamExecutor::UnloadModule(ModuleHandle module_handle) {
+ return implementation_->UnloadModule(module_handle);
+}
+
void StreamExecutor::Deallocate(DeviceMemoryBase *mem) {
VLOG(1) << "Called StreamExecutor::Deallocate(mem=" << mem->opaque()
<< ") mem->size()=" << mem->size() << StackTraceIfVLOG10();
@@ -459,9 +468,34 @@ void *StreamExecutor::Allocate(uint64 size) {
return buf;
}
-bool StreamExecutor::GetSymbol(const string &symbol_name, void **mem,
+port::StatusOr<DeviceMemoryBase> StreamExecutor::GetUntypedSymbol(
+ const string &symbol_name, ModuleHandle module_handle) {
+ // If failed to get the symbol, opaque/bytes are unchanged. Initialize them to
+ // be nullptr/0 for consistency with DeviceMemory semantics.
+ void *opaque = nullptr;
+ size_t bytes = 0;
+ if (GetSymbol(symbol_name, module_handle, &opaque, &bytes)) {
+ return DeviceMemoryBase(opaque, bytes);
+ }
+
+ if (static_cast<bool>(module_handle)) {
+ return port::Status(
+ port::error::NOT_FOUND,
+ port::StrCat("Check if module containing symbol ", symbol_name,
+ " is loaded (module_handle = ",
+ reinterpret_cast<uintptr_t>(module_handle.id()), ")"));
+ } else {
+ return port::Status(
+ port::error::NOT_FOUND,
+ port::StrCat("Check if kernel using the symbol is loaded: ",
+ symbol_name));
+ }
+}
+
+bool StreamExecutor::GetSymbol(const string &symbol_name,
+ ModuleHandle module_handle, void **mem,
size_t *bytes) {
- return implementation_->GetSymbol(symbol_name, mem, bytes);
+ return implementation_->GetSymbol(symbol_name, module_handle, mem, bytes);
}
void *StreamExecutor::UnifiedMemoryAllocate(uint64 bytes) {
diff --git a/tensorflow/stream_executor/stream_executor_pimpl.h b/tensorflow/stream_executor/stream_executor_pimpl.h
index ad80a1ba25..47b3a2b030 100644
--- a/tensorflow/stream_executor/stream_executor_pimpl.h
+++ b/tensorflow/stream_executor/stream_executor_pimpl.h
@@ -106,6 +106,16 @@ class StreamExecutor {
// Releases any state associated with the previously loaded kernel.
void UnloadKernel(const KernelBase *kernel);
+ // Loads a module for the platform this StreamExecutor is acting upon.
+ //
+ // `spec` describes the module to be loaded. On success writes the handle for
+ // the loaded module to `module_handle` and returns true. Else returns false.
+ bool LoadModule(const MultiModuleLoaderSpec &spec,
+ ModuleHandle *module_handle);
+
+ // Unloads the module with handle `module_handle`.
+ bool UnloadModule(ModuleHandle module_handle);
+
// Synchronously allocates an array on the device of type T with element_count
// elements.
template <typename T>
@@ -169,8 +179,16 @@ class StreamExecutor {
// type of symbol and T match.
// - Note: symbol_name should include its namespace as well. For example,
// pass "nms0::symbol" if referring to nms0::symbol.
+ //
+ // If `module_handle` is set then searches only within the module
+ // corresponding to `module_handle`.
template <typename T>
- port::StatusOr<DeviceMemory<T>> GetSymbol(const string &symbol_name);
+ port::StatusOr<DeviceMemory<T>> GetSymbol(const string &symbol_name,
+ ModuleHandle module_handle = {});
+
+ // An untyped version of GetSymbol.
+ port::StatusOr<DeviceMemoryBase> GetUntypedSymbol(
+ const string &symbol_name, ModuleHandle module_handle = {});
// Deallocate the DeviceMemory previously allocated via this interface.
// Deallocation of a nullptr-representative value is permitted.
@@ -507,7 +525,8 @@ class StreamExecutor {
// Finds and retrieves device memory for the symbol on the underlying
// platform.
- bool GetSymbol(const string& symbol_name, void **mem, size_t *bytes);
+ bool GetSymbol(const string &symbol_name, ModuleHandle module_handle,
+ void **mem, size_t *bytes);
// Entrains a memcpy operation onto stream, with a host destination location
// host_dst and a device memory source, with target size size.
@@ -678,6 +697,41 @@ class StreamExecutor {
SE_DISALLOW_COPY_AND_ASSIGN(StreamExecutor);
};
+// A wrapper around ModuleHandle that uses RAII to manage its lifetime.
+class ScopedModuleHandle {
+ public:
+ explicit ScopedModuleHandle(StreamExecutor *executor,
+ ModuleHandle module_handle)
+ : executor_(executor), module_handle_(module_handle) {}
+
+ ScopedModuleHandle(ScopedModuleHandle &&other) {
+ executor_ = other.executor_;
+ module_handle_ = other.module_handle_;
+ other.executor_ = nullptr;
+ other.module_handle_ = ModuleHandle();
+ }
+
+ ScopedModuleHandle &operator=(ScopedModuleHandle &&other) {
+ executor_ = other.executor_;
+ module_handle_ = other.module_handle_;
+ other.executor_ = nullptr;
+ other.module_handle_ = ModuleHandle();
+ return *this;
+ }
+
+ ~ScopedModuleHandle() {
+ if (static_cast<bool>(module_handle_)) {
+ CHECK(executor_->UnloadModule(module_handle_));
+ }
+ }
+
+ private:
+ StreamExecutor *executor_;
+ ModuleHandle module_handle_;
+
+ TF_DISALLOW_COPY_AND_ASSIGN(ScopedModuleHandle);
+};
+
////////////
// Inlines
@@ -690,19 +744,13 @@ inline DeviceMemory<T> StreamExecutor::AllocateArray(uint64 element_count) {
template <typename T>
inline port::StatusOr<DeviceMemory<T>> StreamExecutor::GetSymbol(
- const string &symbol_name) {
- // If failed to get the symbol, opaque/bytes are unchanged. Initialize them to
- // be nullptr/0 for consistency with DeviceMemory semantics.
- void *opaque = nullptr;
- size_t bytes = 0;
- if (GetSymbol(symbol_name, &opaque, &bytes)) {
- CHECK_EQ(bytes % sizeof(T), 0);
- return DeviceMemory<T>::MakeFromByteSize(opaque, bytes);
+ const string &symbol_name, ModuleHandle module_handle) {
+ port::StatusOr<DeviceMemoryBase> untyped_symbol =
+ GetUntypedSymbol(symbol_name, module_handle);
+ if (!untyped_symbol.ok()) {
+ return untyped_symbol.status();
}
- return port::Status(
- port::error::NOT_FOUND,
- port::StrCat("Check if kernel using the symbol is loaded: ",
- symbol_name));
+ return DeviceMemory<T>(untyped_symbol.ValueOrDie());
}
template <typename ElemT>
diff --git a/tensorflow/tensorflow.bzl b/tensorflow/tensorflow.bzl
index 5884870daa..6f1a576c6c 100644
--- a/tensorflow/tensorflow.bzl
+++ b/tensorflow/tensorflow.bzl
@@ -25,6 +25,10 @@ load(
"if_mkl_lnx_x64"
)
load(
+ "//third_party/mkl_dnn:build_defs.bzl",
+ "if_mkl_open_source_only",
+)
+load(
"//third_party/ngraph:build_defs.bzl",
"if_ngraph",
)
@@ -138,6 +142,14 @@ def if_not_mobile(a):
"//conditions:default": a,
})
+# Config setting selector used when building for products
+# which requires restricted licenses to be avoided.
+def if_not_lgpl_restricted(a):
+ _ = (a,)
+ return select({
+ "//conditions:default": [],
+ })
+
def if_not_windows(a):
return select({
clean_dep("//tensorflow:windows"): [],
@@ -184,9 +196,13 @@ def get_win_copts(is_external=False):
"/DEIGEN_AVOID_STL_ARRAY",
"/Iexternal/gemmlowp",
"/wd4018", # -Wno-sign-compare
- "/U_HAS_EXCEPTIONS",
- "/D_HAS_EXCEPTIONS=1",
- "/EHsc", # -fno-exceptions
+ # Bazel's CROSSTOOL currently pass /EHsc to enable exception by
+ # default. We can't pass /EHs-c- to disable exception, otherwise
+ # we will get a waterfall of flag conflict warnings. Wait for
+ # Bazel to fix this.
+ # "/D_HAS_EXCEPTIONS=0",
+ # "/EHs-c-",
+ "/wd4577",
"/DNOGDI",
]
if is_external:
@@ -218,6 +234,7 @@ def tf_copts(android_optimization_level_override="-O2", is_external=False):
+ if_cuda(["-DGOOGLE_CUDA=1"])
+ if_tensorrt(["-DGOOGLE_TENSORRT=1"])
+ if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"])
+ + if_mkl_open_source_only(["-DDO_NOT_USE_ML"])
+ if_ngraph(["-DINTEL_NGRAPH=1"])
+ if_mkl_lnx_x64(["-fopenmp"])
+ if_android_arm(["-mfpu=neon"])
@@ -233,6 +250,7 @@ def tf_copts(android_optimization_level_override="-O2", is_external=False):
clean_dep("//tensorflow:windows"): get_win_copts(is_external),
clean_dep("//tensorflow:windows_msvc"): get_win_copts(is_external),
clean_dep("//tensorflow:ios"): ["-std=c++11"],
+ clean_dep("//tensorflow:no_lgpl_deps"): ["-D__TENSORFLOW_NO_LGPL_DEPS__", "-pthread"],
"//conditions:default": ["-pthread"]
}))
@@ -1348,7 +1366,7 @@ def tf_custom_op_library(name, srcs=[], gpu_srcs=[], deps=[], linkopts=[]):
name=name,
srcs=srcs,
deps=deps + if_cuda(cuda_deps),
- data=[name + "_check_deps"],
+ data=if_static([name + "_check_deps"]),
copts=tf_copts(is_external=True),
features = ["windows_export_all_symbols"],
linkopts=linkopts + select({
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
index 9e09a8d48e..ef9fe096a1 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.-experimental.pbtxt
@@ -8,5 +8,11 @@ tf_proto {
label: LABEL_OPTIONAL
type: TYPE_STRING
}
+ field {
+ name: "client_handles_error_formatting"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
index 4af4ed70ef..eeef15515d 100644
--- a/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-config-proto.pbtxt
@@ -131,6 +131,12 @@ tf_proto {
label: LABEL_OPTIONAL
type: TYPE_STRING
}
+ field {
+ name: "client_handles_error_formatting"
+ number: 2
+ label: LABEL_OPTIONAL
+ type: TYPE_BOOL
+ }
}
}
}
diff --git a/tensorflow/tools/api/golden/tensorflow.-variable.pbtxt b/tensorflow/tools/api/golden/tensorflow.-variable.pbtxt
index 23b552cc38..e841c4ad89 100644
--- a/tensorflow/tools/api/golden/tensorflow.-variable.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.-variable.pbtxt
@@ -49,7 +49,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'initial_value\', \'trainable\', \'collections\', \'validate_shape\', \'caching_device\', \'name\', \'variable_def\', \'dtype\', \'expected_shape\', \'import_scope\', \'constraint\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'initial_value\', \'trainable\', \'collections\', \'validate_shape\', \'caching_device\', \'name\', \'variable_def\', \'dtype\', \'expected_shape\', \'import_scope\', \'constraint\', \'use_resource\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'True\', \'None\', \'True\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
}
member_method {
name: "assign"
diff --git a/tensorflow/tools/api/golden/tensorflow.compat.pbtxt b/tensorflow/tools/api/golden/tensorflow.compat.pbtxt
index bab480ff9b..f1d760603e 100644
--- a/tensorflow/tools/api/golden/tensorflow.compat.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.compat.pbtxt
@@ -33,6 +33,14 @@ tf_module {
argspec: "args=[\'bytes_or_text\', \'encoding\'], varargs=None, keywords=None, defaults=[\'utf-8\'], "
}
member_method {
+ name: "forward_compatibility_horizon"
+ argspec: "args=[\'year\', \'month\', \'day\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
+ name: "forward_compatible"
+ argspec: "args=[\'year\', \'month\', \'day\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "path_to_str"
argspec: "args=[\'path\'], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
index c8da55d802..5aa4b3d4fb 100644
--- a/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.estimator.-run-config.pbtxt
@@ -51,6 +51,10 @@ tf_class {
mtype: "<type \'property\'>"
}
member {
+ name: "protocol"
+ mtype: "<type \'property\'>"
+ }
+ member {
name: "save_checkpoints_secs"
mtype: "<type \'property\'>"
}
@@ -88,7 +92,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\'], "
+ argspec: "args=[\'self\', \'model_dir\', \'tf_random_seed\', \'save_summary_steps\', \'save_checkpoints_steps\', \'save_checkpoints_secs\', \'session_config\', \'keep_checkpoint_max\', \'keep_checkpoint_every_n_hours\', \'log_step_count_steps\', \'train_distribute\', \'device_fn\', \'protocol\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'100\', \'<object object instance>\', \'<object object instance>\', \'None\', \'5\', \'10000\', \'100\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "replace"
diff --git a/tensorflow/tools/api/golden/tensorflow.initializers.pbtxt b/tensorflow/tools/api/golden/tensorflow.initializers.pbtxt
index eaf0036cac..bc0426f2f1 100644
--- a/tensorflow/tools/api/golden/tensorflow.initializers.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.initializers.pbtxt
@@ -45,6 +45,30 @@ tf_module {
argspec: "args=[], varargs=None, keywords=None, defaults=None"
}
member_method {
+ name: "glorot_normal"
+ argspec: "args=[\'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "glorot_uniform"
+ argspec: "args=[\'seed\', \'dtype\'], varargs=None, keywords=None, defaults=[\'None\', \"<dtype: \'float32\'>\"], "
+ }
+ member_method {
+ name: "he_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "he_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_normal"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
+ name: "lecun_uniform"
+ argspec: "args=[\'seed\'], varargs=None, keywords=None, defaults=[\'None\'], "
+ }
+ member_method {
name: "local_variables"
argspec: "args=[], varargs=None, keywords=None, defaults=None"
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
index 8295905975..65cfad77d1 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.-sequential.pbtxt
@@ -267,6 +267,10 @@ tf_class {
argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
member_method {
+ name: "symbolic_set_inputs"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "test_on_batch"
argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt
index 2cd83baf65..2e9de9ebb2 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.activations.pbtxt
@@ -22,7 +22,7 @@ tf_module {
}
member_method {
name: "relu"
- argspec: "args=[\'x\', \'alpha\', \'max_value\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\'], "
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
}
member_method {
name: "selu"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt
index c6149e8aa7..126ce8db6a 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.backend.pbtxt
@@ -70,7 +70,7 @@ tf_module {
}
member_method {
name: "categorical_crossentropy"
- argspec: "args=[\'target\', \'output\', \'from_logits\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'target\', \'output\', \'from_logits\', \'axis\'], varargs=None, keywords=None, defaults=[\'False\', \'-1\'], "
}
member_method {
name: "clear_session"
@@ -366,7 +366,7 @@ tf_module {
}
member_method {
name: "relu"
- argspec: "args=[\'x\', \'alpha\', \'max_value\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\'], "
+ argspec: "args=[\'x\', \'alpha\', \'max_value\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.0\', \'None\', \'0\'], "
}
member_method {
name: "repeat"
@@ -462,7 +462,7 @@ tf_module {
}
member_method {
name: "sparse_categorical_crossentropy"
- argspec: "args=[\'target\', \'output\', \'from_logits\'], varargs=None, keywords=None, defaults=[\'False\'], "
+ argspec: "args=[\'target\', \'output\', \'from_logits\', \'axis\'], varargs=None, keywords=None, defaults=[\'False\', \'-1\'], "
}
member_method {
name: "spatial_2d_padding"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
index 2f52464315..e58ba18c1c 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.callbacks.-tensor-board.pbtxt
@@ -5,7 +5,7 @@ tf_class {
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\'], "
+ argspec: "args=[\'self\', \'log_dir\', \'histogram_freq\', \'batch_size\', \'write_graph\', \'write_grads\', \'write_images\', \'embeddings_freq\', \'embeddings_layer_names\', \'embeddings_metadata\', \'embeddings_data\'], varargs=None, keywords=None, defaults=[\'./logs\', \'0\', \'32\', \'True\', \'False\', \'False\', \'0\', \'None\', \'None\', \'None\'], "
}
member_method {
name: "on_batch_begin"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt
index c00fa79adf..4d3de58bd1 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.layers.-re-l-u.pbtxt
@@ -82,7 +82,7 @@ tf_class {
}
member_method {
name: "__init__"
- argspec: "args=[\'self\', \'max_value\'], varargs=None, keywords=kwargs, defaults=[\'None\'], "
+ argspec: "args=[\'self\', \'max_value\', \'negative_slope\', \'threshold\'], varargs=None, keywords=kwargs, defaults=[\'None\', \'0\', \'0\'], "
}
member_method {
name: "add_loss"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt
index a97a9b5758..73b577da37 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.metrics.pbtxt
@@ -22,7 +22,7 @@ tf_module {
}
member_method {
name: "binary_accuracy"
- argspec: "args=[\'y_true\', \'y_pred\'], varargs=None, keywords=None, defaults=None"
+ argspec: "args=[\'y_true\', \'y_pred\', \'threshold\'], varargs=None, keywords=None, defaults=[\'0.5\'], "
}
member_method {
name: "binary_crossentropy"
diff --git a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
index 5211657414..6a83129f7d 100644
--- a/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.keras.models.-sequential.pbtxt
@@ -267,6 +267,10 @@ tf_class {
argspec: "args=[\'self\', \'line_length\', \'positions\', \'print_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], "
}
member_method {
+ name: "symbolic_set_inputs"
+ argspec: "args=[\'self\', \'inputs\'], varargs=None, keywords=None, defaults=None"
+ }
+ member_method {
name: "test_on_batch"
argspec: "args=[\'self\', \'x\', \'y\', \'sample_weight\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt b/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt
new file mode 100644
index 0000000000..49ff85728f
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.__metaclass__.pbtxt
@@ -0,0 +1,14 @@
+path: "tensorflow.linalg.LinearOperatorZeros.__metaclass__"
+tf_class {
+ is_instance: "<class \'abc.ABCMeta\'>"
+ member_method {
+ name: "__init__"
+ }
+ member_method {
+ name: "mro"
+ }
+ member_method {
+ name: "register"
+ argspec: "args=[\'cls\', \'subclass\'], varargs=None, keywords=None, defaults=None"
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.pbtxt b/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.pbtxt
new file mode 100644
index 0000000000..a1b0e06b47
--- /dev/null
+++ b/tensorflow/tools/api/golden/tensorflow.linalg.-linear-operator-zeros.pbtxt
@@ -0,0 +1,130 @@
+path: "tensorflow.linalg.LinearOperatorZeros"
+tf_class {
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator_zeros.LinearOperatorZeros\'>"
+ is_instance: "<class \'tensorflow.python.ops.linalg.linear_operator.LinearOperator\'>"
+ is_instance: "<type \'object\'>"
+ member {
+ name: "batch_shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "domain_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "dtype"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "graph_parents"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_non_singular"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_positive_definite"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_self_adjoint"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "is_square"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "name"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "range_dimension"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "shape"
+ mtype: "<type \'property\'>"
+ }
+ member {
+ name: "tensor_rank"
+ mtype: "<type \'property\'>"
+ }
+ member_method {
+ name: "__init__"
+ argspec: "args=[\'self\', \'num_rows\', \'num_columns\', \'batch_shape\', \'dtype\', \'is_non_singular\', \'is_self_adjoint\', \'is_positive_definite\', \'is_square\', \'assert_proper_shapes\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'False\', \'True\', \'False\', \'True\', \'False\', \'LinearOperatorZeros\'], "
+ }
+ member_method {
+ name: "add_to_tensor"
+ argspec: "args=[\'self\', \'mat\', \'name\'], varargs=None, keywords=None, defaults=[\'add_to_tensor\'], "
+ }
+ member_method {
+ name: "assert_non_singular"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_non_singular\'], "
+ }
+ member_method {
+ name: "assert_positive_definite"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_positive_definite\'], "
+ }
+ member_method {
+ name: "assert_self_adjoint"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'assert_self_adjoint\'], "
+ }
+ member_method {
+ name: "batch_shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'batch_shape_tensor\'], "
+ }
+ member_method {
+ name: "determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'det\'], "
+ }
+ member_method {
+ name: "diag_part"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'diag_part\'], "
+ }
+ member_method {
+ name: "domain_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'domain_dimension_tensor\'], "
+ }
+ member_method {
+ name: "log_abs_determinant"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'log_abs_det\'], "
+ }
+ member_method {
+ name: "matmul"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'matmul\'], "
+ }
+ member_method {
+ name: "matvec"
+ argspec: "args=[\'self\', \'x\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'matvec\'], "
+ }
+ member_method {
+ name: "range_dimension_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'range_dimension_tensor\'], "
+ }
+ member_method {
+ name: "shape_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'shape_tensor\'], "
+ }
+ member_method {
+ name: "solve"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'adjoint_arg\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'False\', \'solve\'], "
+ }
+ member_method {
+ name: "solvevec"
+ argspec: "args=[\'self\', \'rhs\', \'adjoint\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'solve\'], "
+ }
+ member_method {
+ name: "tensor_rank_tensor"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'tensor_rank_tensor\'], "
+ }
+ member_method {
+ name: "to_dense"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'to_dense\'], "
+ }
+ member_method {
+ name: "trace"
+ argspec: "args=[\'self\', \'name\'], varargs=None, keywords=None, defaults=[\'trace\'], "
+ }
+}
diff --git a/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt b/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
index 3b5845f99a..d979116887 100644
--- a/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.linalg.pbtxt
@@ -52,6 +52,10 @@ tf_module {
name: "LinearOperatorScaledIdentity"
mtype: "<class \'abc.ABCMeta\'>"
}
+ member {
+ name: "LinearOperatorZeros"
+ mtype: "<class \'abc.ABCMeta\'>"
+ }
member_method {
name: "adjoint"
argspec: "args=[\'matrix\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
diff --git a/tensorflow/tools/api/golden/tensorflow.pbtxt b/tensorflow/tools/api/golden/tensorflow.pbtxt
index 4f90743fec..5eb42b4db3 100644
--- a/tensorflow/tools/api/golden/tensorflow.pbtxt
+++ b/tensorflow/tools/api/golden/tensorflow.pbtxt
@@ -258,7 +258,7 @@ tf_module {
}
member {
name: "Variable"
- mtype: "<type \'type\'>"
+ mtype: "<class \'tensorflow.python.ops.variables.VariableMetaclass\'>"
}
member {
name: "VariableAggregation"
diff --git a/tensorflow/tools/api/tests/api_compatibility_test.py b/tensorflow/tools/api/tests/api_compatibility_test.py
index 90375a794f..d1b34fb242 100644
--- a/tensorflow/tools/api/tests/api_compatibility_test.py
+++ b/tensorflow/tools/api/tests/api_compatibility_test.py
@@ -34,6 +34,13 @@ import sys
import unittest
import tensorflow as tf
+# pylint: disable=g-import-not-at-top
+try:
+ from tensorflow.compat import v1 as tf_v1
+ # We import compat.v1 as tf_v1 instead.
+ del tf.compat.v1
+except ImportError:
+ tf_v1 = None
from google.protobuf import message
from google.protobuf import text_format
@@ -46,6 +53,7 @@ from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
+# pylint: enable=g-import-not-at-top
# FLAGS defined at the bottom:
@@ -215,25 +223,19 @@ class ApiCompatibilityTest(test.TestCase):
visitor.do_not_descend_map['tf'].append('contrib')
traverse.traverse(tf, visitor)
- @unittest.skipUnless(
- sys.version_info.major == 2,
- 'API compabitility test goldens are generated using python2.')
- def testAPIBackwardsCompatibility(self):
- # Extract all API stuff.
+ def checkBackwardsCompatibility(self, root, golden_file_pattern):
+ # Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
public_api_visitor.do_not_descend_map['tf.GPUOptions'] = ['Experimental']
- traverse.traverse(tf, public_api_visitor)
+ traverse.traverse(root, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
- expression = os.path.join(
- resource_loader.get_root_dir_with_all_resources(),
- _KeyToFilePath('*'))
- golden_file_list = file_io.get_matching_files(expression)
+ golden_file_list = file_io.get_matching_files(golden_file_pattern)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
@@ -254,6 +256,26 @@ class ApiCompatibilityTest(test.TestCase):
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibility(self):
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*'))
+ self.checkBackwardsCompatibility(tf, golden_file_pattern)
+
+ @unittest.skipUnless(
+ sys.version_info.major == 2,
+ 'API compabitility test goldens are generated using python2.')
+ def testAPIBackwardsCompatibilityV1(self):
+ if not tf_v1:
+ return
+ golden_file_pattern = os.path.join(
+ resource_loader.get_root_dir_with_all_resources(),
+ _KeyToFilePath('*'))
+ self.checkBackwardsCompatibility(tf_v1, golden_file_pattern)
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
diff --git a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
index e879c34bbd..ada2c63880 100644
--- a/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
+++ b/tensorflow/tools/ci_build/Dockerfile.cpu.ppc64le
@@ -7,7 +7,7 @@ COPY install/*.sh /install/
RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-RUN apt-get update && apt-get install -y libopenblas-dev
+RUN /install/install_openblas_ppc64le.sh
RUN /install/install_hdf5_ppc64le.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel_from_source.sh
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu
index 7591ecc04e..33d0425918 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu
@@ -14,6 +14,11 @@ RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa && \
add-apt-repository -y ppa:george-edison55/cmake-3.x
RUN /install/install_deb_packages.sh
+
+# Install NCCL
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ libnccl2=2.2.13-1+cuda9.0
+
RUN /install/install_pip_packages.sh
RUN /install/install_bazel.sh
RUN /install/install_golang.sh
@@ -22,6 +27,11 @@ RUN /install/install_golang.sh
COPY install/.bazelrc /etc/bazel.bazelrc
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
# Configure the build for our CUDA configuration.
ENV TF_NEED_CUDA 1
ENV TF_CUDA_COMPUTE_CAPABILITIES 3.0
diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
index 8967138747..a404f129ab 100644
--- a/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
+++ b/tensorflow/tools/ci_build/Dockerfile.gpu.ppc64le
@@ -13,7 +13,7 @@ ARG DEBIAN_FRONTEND=noninteractive
RUN /install/install_bootstrap_deb_packages.sh
RUN add-apt-repository -y ppa:openjdk-r/ppa
RUN /install/install_deb_packages.sh
-RUN apt-get update && apt-get install -y libopenblas-dev
+RUN /install/install_openblas_ppc64le.sh
RUN /install/install_hdf5_ppc64le.sh
RUN /install/install_pip_packages.sh
RUN /install/install_bazel_from_source.sh
diff --git a/tensorflow/tools/ci_build/ci_parameterized_build.sh b/tensorflow/tools/ci_build/ci_parameterized_build.sh
index 08e2c3edd2..5115be8c6d 100755
--- a/tensorflow/tools/ci_build/ci_parameterized_build.sh
+++ b/tensorflow/tools/ci_build/ci_parameterized_build.sh
@@ -150,36 +150,7 @@ BAZEL_TARGET="//tensorflow/... -//tensorflow/compiler/..."
if [[ -n "$TF_SKIP_CONTRIB_TESTS" ]]; then
BAZEL_TARGET="$BAZEL_TARGET -//tensorflow/contrib/..."
else
- BAZEL_TARGET="${BAZEL_TARGET} -//tensorflow/contrib/lite/..."
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:context_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:framework"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:interpreter_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:model_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/toco:toco"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:simple_memory_arena_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite:string_util_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:activations_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:add_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:basic_rnn_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:concatenation_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:conv_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:depthwise_conv_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:embedding_lookup_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:embedding_lookup_sparse_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:fully_connected_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:hashtable_lookup_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:local_response_norm_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:lsh_projection_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:lstm_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:l2norm_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:mul_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:pooling_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:reshape_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:resize_bilinear_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:skip_gram_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:softmax_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:space_to_depth_test"
- BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/kernels:svdf_test"
+ BAZEL_TARGET="${BAZEL_TARGET} //tensorflow/contrib/lite/..."
fi
TUT_TEST_DATA_DIR="/tmp/tf_tutorial_test_data"
diff --git a/tensorflow/tools/ci_build/ci_sanity.sh b/tensorflow/tools/ci_build/ci_sanity.sh
index db37edf809..866fe95d2b 100755
--- a/tensorflow/tools/ci_build/ci_sanity.sh
+++ b/tensorflow/tools/ci_build/ci_sanity.sh
@@ -354,7 +354,7 @@ do_external_licenses_check(){
# Whitelist
echo ${EXTRA_LICENSE_FILE}
- grep -e "@bazel_tools//src" -e "@bazel_tools//tools/" -e "@com_google_absl//" -e "//external" -e "@local" -e "@com_github_googlecloudplatform_google_cloud_cpp//" -v ${EXTRA_LICENSES_FILE} > temp.txt
+ grep -e "@bazel_tools//src" -e "@bazel_tools//tools/" -e "@com_google_absl//" -e "//external" -e "@local" -e "@com_github_googlecloudplatform_google_cloud_cpp//" -e "@embedded_jdk//" -v ${EXTRA_LICENSES_FILE} > temp.txt
mv temp.txt ${EXTRA_LICENSES_FILE}
diff --git a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
index d0816c92b7..75da9bb835 100755
--- a/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
+++ b/tensorflow/tools/ci_build/gpu_build/parallel_gpu_execute.sh
@@ -35,6 +35,30 @@ elif [[ ${BASH_VER_MAJOR} -eq 4 ]] && [[ ${BASH_VER_MINOR} -lt 2 ]]; then
exit 1
fi
+function is_absolute {
+ [[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]]
+}
+
+RUNFILES_MANIFEST_FILE="${TEST_SRCDIR}/MANIFEST"
+function rlocation() {
+ if is_absolute "$1" ; then
+ # If the file path is already fully specified, simply return it.
+ echo "$1"
+ elif [[ -e "$TEST_SRCDIR/$1" ]]; then
+ # If the file exists in the $TEST_SRCDIR then just use it.
+ echo "$TEST_SRCDIR/$1"
+ elif [[ -e "$RUNFILES_MANIFEST_FILE" ]]; then
+ # If a runfiles manifest file exists then use it.
+ echo "$(grep "^$1 " "$RUNFILES_MANIFEST_FILE" | sed 's/[^ ]* //')"
+ fi
+}
+
+TEST_BINARY="$(rlocation $TEST_WORKSPACE/${1#./})"
+shift
+
+# Make sure /var/lock exists, this may not be true under MSYS
+mkdir -p /var/lock
+
TF_GPU_COUNT=${TF_GPU_COUNT:-8}
for i in `seq 0 $((TF_GPU_COUNT-1))`; do
@@ -45,8 +69,8 @@ for i in `seq 0 $((TF_GPU_COUNT-1))`; do
# This export only works within the brackets, so it is isolated to one
# single command.
export CUDA_VISIBLE_DEVICES=$i
- echo "Running test $* on GPU $CUDA_VISIBLE_DEVICES"
- $@
+ echo "Running test $TEST_BINARY $* on GPU $CUDA_VISIBLE_DEVICES"
+ "$TEST_BINARY" $@
)
return_code=$?
flock -u "$lock_fd"
diff --git a/tensorflow/tools/ci_build/install/install_bazel.sh b/tensorflow/tools/ci_build/install/install_bazel.sh
index 3e27a94cf2..e284401b8a 100755
--- a/tensorflow/tools/ci_build/install/install_bazel.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel.sh
@@ -15,7 +15,7 @@
# ==============================================================================
# Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.15.0"
set +e
local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
index ddad00c5f0..87be81577d 100755
--- a/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
+++ b/tensorflow/tools/ci_build/install/install_bazel_from_source.sh
@@ -18,7 +18,7 @@
# It will compile bazel from source and install it in /usr/local/bin
# Select bazel version.
-BAZEL_VERSION="0.11.0"
+BAZEL_VERSION="0.15.0"
set +e
local_bazel_ver=$(bazel version 2>&1 | grep -i label | awk '{print $3}')
diff --git a/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
new file mode 100755
index 0000000000..107cc61ff5
--- /dev/null
+++ b/tensorflow/tools/ci_build/install/install_openblas_ppc64le.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+OPENBLAS_SRC_PATH=/tmp/openblas_src/
+POWER="POWER8"
+USE_OPENMP="USE_OPENMP=1"
+OPENBLAS_INSTALL_PATH="/usr"
+apt-get update
+apt-get install -y gfortran gfortran-5
+rm -rf ${OPENBLAS_SRC_PATH}
+git clone -b release-0.3.0 https://github.com/xianyi/OpenBLAS ${OPENBLAS_SRC_PATH}
+cd ${OPENBLAS_SRC_PATH}
+# Pick up fix for OpenBLAS issue 1571
+git cherry-pick -X theirs 961d25e9c7e4a1758adb1dbeaa15187de69dd052
+make TARGET=${POWER} ${USE_OPENMP} FC=gfortran
+make PREFIX=${OPENBLAS_INSTALL_PATH} install
diff --git a/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh b/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
index 2b68de3c5b..f6fa9251d4 100755
--- a/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
+++ b/tensorflow/tools/ci_build/linux/cpu/run_py3_contrib.sh
@@ -34,35 +34,4 @@ yes "" | $PYTHON_BIN_PATH configure.py
bazel test --test_tag_filters=-no_oss,-oss_serial,-gpu,-benchmark-test -k \
--jobs=${N_JOBS} --test_timeout 300,450,1200,3600 --config=opt \
--test_size_filters=small,medium --test_output=errors -- \
- //tensorflow/contrib/... \
- -//tensorflow/contrib/lite/... \
- //tensorflow/contrib/lite:context_test \
- //tensorflow/contrib/lite:framework \
- //tensorflow/contrib/lite:interpreter_test \
- //tensorflow/contrib/lite:model_test \
- //tensorflow/contrib/lite/toco:toco \
- //tensorflow/contrib/lite:simple_memory_arena_test \
- //tensorflow/contrib/lite:string_util_test \
- //tensorflow/contrib/lite/kernels:activations_test \
- //tensorflow/contrib/lite/kernels:add_test \
- //tensorflow/contrib/lite/kernels:basic_rnn_test \
- //tensorflow/contrib/lite/kernels:concatenation_test \
- //tensorflow/contrib/lite/kernels:conv_test \
- //tensorflow/contrib/lite/kernels:depthwise_conv_test \
- //tensorflow/contrib/lite/kernels:embedding_lookup_test \
- //tensorflow/contrib/lite/kernels:embedding_lookup_sparse_test \
- //tensorflow/contrib/lite/kernels:fully_connected_test \
- //tensorflow/contrib/lite/testing:generated_zip_tests \
- //tensorflow/contrib/lite/kernels:hashtable_lookup_test \
- //tensorflow/contrib/lite/kernels:local_response_norm_test \
- //tensorflow/contrib/lite/kernels:lsh_projection_test \
- //tensorflow/contrib/lite/kernels:lstm_test \
- //tensorflow/contrib/lite/kernels:l2norm_test \
- //tensorflow/contrib/lite/kernels:mul_test \
- //tensorflow/contrib/lite/kernels:pooling_test \
- //tensorflow/contrib/lite/kernels:reshape_test \
- //tensorflow/contrib/lite/kernels:resize_bilinear_test \
- //tensorflow/contrib/lite/kernels:skip_gram_test \
- //tensorflow/contrib/lite/kernels:softmax_test \
- //tensorflow/contrib/lite/kernels:space_to_depth_test \
- //tensorflow/contrib/lite/kernels:svdf_test
+ //tensorflow/contrib/...
diff --git a/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh b/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
index ad22ebe4eb..a1d91a6123 100755
--- a/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
+++ b/tensorflow/tools/ci_build/linux/mkl/build-dev-container.sh
@@ -34,12 +34,17 @@ echo "TF_DOCKER_BUILD_DEVEL_BRANCH=${TF_DOCKER_BUILD_DEVEL_BRANCH}"
echo "TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME}"
echo "TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION}"
+# Build containers for AVX
+# Include the instructions for sandybridge and later, but tune for ivybridge
+TF_BAZEL_BUILD_OPTIONS="--config=mkl --copt=-march=sandybridge --copt=-mtune=ivybridge --copt=-O3 --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0"
+
# build the python 2 container and whl
TF_DOCKER_BUILD_TYPE="MKL" \
TF_DOCKER_BUILD_IS_DEVEL="YES" \
TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
# build the python 3 container and whl
@@ -49,5 +54,29 @@ TF_DOCKER_BUILD_TYPE="MKL" \
TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}" \
TF_DOCKER_BUILD_PYTHON_VERSION="PYTHON3" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
+# Build containers for AVX2
+# Include the instructions for haswell and later, but tune for broadwell
+TF_BAZEL_BUILD_OPTIONS="--config=mkl --copt=-march=haswell --copt=-mtune=broadwell --copt=-O3 --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0"
+
+# build the python 2 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}-avx2" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+# build the python 3 container and whl
+TF_DOCKER_BUILD_TYPE="MKL" \
+ TF_DOCKER_BUILD_IS_DEVEL="YES" \
+ TF_DOCKER_BUILD_DEVEL_BRANCH="${TF_DOCKER_BUILD_DEVEL_BRANCH}" \
+ TF_DOCKER_BUILD_IMAGE_NAME="${TF_DOCKER_BUILD_IMAGE_NAME}" \
+ TF_DOCKER_BUILD_VERSION="${TF_DOCKER_BUILD_VERSION}-avx2" \
+ TF_DOCKER_BUILD_PYTHON_VERSION="PYTHON3" \
+ TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}" \
+ ${WORKSPACE}/tensorflow/tools/docker/parameterized_docker_build.sh
+
diff --git a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
index c03cbd9c66..0482cf619a 100644
--- a/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/bazel_test_lib.sh
@@ -33,10 +33,10 @@ function set_remote_cache_options {
echo "build --tls_enabled=true" >> "${TMP_BAZELRC}"
echo "build --remote_timeout=3600" >> "${TMP_BAZELRC}"
echo "build --auth_enabled=true" >> "${TMP_BAZELRC}"
- echo "build --spawn_strategy=remote" >> "${TMP_BAZELRC}"
- echo "build --strategy=Javac=remote" >> "${TMP_BAZELRC}"
- echo "build --strategy=Closure=remote" >> "${TMP_BAZELRC}"
- echo "build --genrule_strategy=remote" >> "${TMP_BAZELRC}"
+ echo "build --spawn_strategy=standalone" >> "${TMP_BAZELRC}"
+ echo "build --strategy=Javac=standalone" >> "${TMP_BAZELRC}"
+ echo "build --strategy=Closure=standalone" >> "${TMP_BAZELRC}"
+ echo "build --genrule_strategy=standalone" >> "${TMP_BAZELRC}"
echo "build --google_credentials=$GOOGLE_CLOUD_CREDENTIAL" >> "${TMP_BAZELRC}"
}
diff --git a/tensorflow/tools/ci_build/windows/bazel/common_env.sh b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
index 3af132217e..333a89d3f5 100644
--- a/tensorflow/tools/ci_build/windows/bazel/common_env.sh
+++ b/tensorflow/tools/ci_build/windows/bazel/common_env.sh
@@ -26,7 +26,8 @@
# * Bazel windows executable copied as "bazel.exe" and included in PATH.
# Use a temporary directory with a short name.
-export TMPDIR="C:/tmp"
+export TMPDIR=${TMPDIR:-"C:/tmp"}
+export TMPDIR=$(cygpath -m "$TMPDIR")
mkdir -p "$TMPDIR"
# Set bash path
diff --git a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
index ed73401467..47e0e5dd59 100644
--- a/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/cpu/pip/build_tf_windows.sh
@@ -53,30 +53,39 @@ function cleanup {
}
trap cleanup EXIT
-skip_test=0
-release_build=0
+PY_TEST_DIR="py_test_dir"
+SKIP_TEST=0
+RELEASE_BUILD=0
+TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/... \
+ //${PY_TEST_DIR}/tensorflow/contrib/... "
+
+# --skip_test Skip running tests
+# --enable_remote_cache Add options to enable remote cache for build and test
+# --release_build Build for release, compilation time will be longer to
+# ensure performance
+# --test_core_only Use tensorflow/python/... as test target
+# --test_contrib_only Use tensorflow/contrib/... as test target
for ARG in "$@"; do
- if [[ "$ARG" == --skip_test ]]; then
- skip_test=1
- elif [[ "$ARG" == --enable_remote_cache ]]; then
- set_remote_cache_options
- elif [[ "$ARG" == --release_build ]]; then
- release_build=1
- fi
+ case "$ARG" in
+ --skip_test) SKIP_TEST=1 ;;
+ --enable_remote_cache) set_remote_cache_options ;;
+ --release_build) RELEASE_BUILD=1 ;;
+ --test_core_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/..." ;;
+ --test_contrib_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/contrib/..." ;;
+ *)
+ esac
done
-if [[ "$release_build" != 1 ]]; then
- # --define=override_eigen_strong_inline=true speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
+if [[ "$RELEASE_BUILD" == 1 ]]; then
+ # Overriding eigen strong inline speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
# by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
- # Because this hurts the performance of TF, we don't enable it in release build.
- echo "build --define=override_eigen_strong_inline=true" >> "${TMP_BAZELRC}"
+ # Because this hurts the performance of TF, we don't override it in release build.
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=0
+else
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=1
fi
-# The host and target platforms are the same in Windows build. So we don't have
-# to distinct them. This helps avoid building the same targets twice.
-echo "build --distinct_host_configuration=false" >> "${TMP_BAZELRC}"
-
# Enable short object file path to avoid long path issue on Windows.
echo "startup --output_user_root=${TMPDIR}" >> "${TMP_BAZELRC}"
@@ -88,12 +97,11 @@ run_configure_for_cpu_build
bazel build --announce_rc --config=opt tensorflow/tools/pip_package:build_pip_package || exit $?
-if [[ "$skip_test" == 1 ]]; then
+if [[ "$SKIP_TEST" == 1 ]]; then
exit 0
fi
# Create a python test directory to avoid package name conflict
-PY_TEST_DIR="py_test_dir"
create_python_test_dir "${PY_TEST_DIR}"
./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
@@ -111,7 +119,7 @@ bazel test --announce_rc --config=opt -k --test_output=errors \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
--test_tag_filters=-no_pip,-no_windows,-no_oss \
--build_tag_filters=-no_pip,-no_windows,-no_oss --build_tests_only \
+ --test_size_filters=small,medium \
--jobs="${N_JOBS}" --test_timeout="300,450,1200,3600" \
--flaky_test_attempts=3 \
- //${PY_TEST_DIR}/tensorflow/python/... \
- //${PY_TEST_DIR}/tensorflow/contrib/...
+ ${TEST_TARGET}
diff --git a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
index fe3bce428f..e3eee11080 100644
--- a/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
+++ b/tensorflow/tools/ci_build/windows/gpu/pip/build_tf_windows.sh
@@ -53,30 +53,39 @@ function cleanup {
}
trap cleanup EXIT
-skip_test=0
-release_build=0
+PY_TEST_DIR="py_test_dir"
+SKIP_TEST=0
+RELEASE_BUILD=0
+TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/... \
+ //${PY_TEST_DIR}/tensorflow/contrib/... "
+
+# --skip_test Skip running tests
+# --enable_remote_cache Add options to enable remote cache for build and test
+# --release_build Build for release, compilation time will be longer to
+# ensure performance
+# --test_core_only Use tensorflow/python/... as test target
+# --test_contrib_only Use tensorflow/contrib/... as test target
for ARG in "$@"; do
- if [[ "$ARG" == --skip_test ]]; then
- skip_test=1
- elif [[ "$ARG" == --enable_remote_cache ]]; then
- set_remote_cache_options
- elif [[ "$ARG" == --release_build ]]; then
- release_build=1
- fi
+ case "$ARG" in
+ --skip_test) SKIP_TEST=1 ;;
+ --enable_remote_cache) set_remote_cache_options ;;
+ --release_build) RELEASE_BUILD=1 ;;
+ --test_core_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/python/..." ;;
+ --test_contrib_only) TEST_TARGET="//${PY_TEST_DIR}/tensorflow/contrib/..." ;;
+ *)
+ esac
done
-if [[ "$release_build" != 1 ]]; then
- # --define=override_eigen_strong_inline=true speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
+if [[ "$RELEASE_BUILD" == 1 ]]; then
+ # Overriding eigen strong inline speeds up the compiling of conv_grad_ops_3d.cc and conv_ops_3d.cc
# by 20 minutes. See https://github.com/tensorflow/tensorflow/issues/10521
- # Because this hurts the performance of TF, we don't enable it in release build.
- echo "build --define=override_eigen_strong_inline=true" >> "${TMP_BAZELRC}"
+ # Because this hurts the performance of TF, we don't override it in release build.
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=0
+else
+ export TF_OVERRIDE_EIGEN_STRONG_INLINE=1
fi
-# The host and target platforms are the same in Windows build. So we don't have
-# to distinct them. This helps avoid building the same targets twice.
-echo "build --distinct_host_configuration=false" >> "${TMP_BAZELRC}"
-
# Enable short object file path to avoid long path issue on Windows.
echo "startup --output_user_root=${TMPDIR}" >> "${TMP_BAZELRC}"
@@ -91,12 +100,11 @@ run_configure_for_gpu_build
bazel build --announce_rc --config=opt tensorflow/tools/pip_package:build_pip_package || exit $?
-if [[ "$skip_test" == 1 ]]; then
+if [[ "$SKIP_TEST" == 1 ]]; then
exit 0
fi
# Create a python test directory to avoid package name conflict
-PY_TEST_DIR="py_test_dir"
create_python_test_dir "${PY_TEST_DIR}"
./bazel-bin/tensorflow/tools/pip_package/build_pip_package "$PWD/${PY_TEST_DIR}"
@@ -105,14 +113,18 @@ create_python_test_dir "${PY_TEST_DIR}"
PIP_NAME=$(ls ${PY_TEST_DIR}/tensorflow-*.whl)
reinstall_tensorflow_pip ${PIP_NAME}
+TF_GPU_COUNT=${TF_GPU_COUNT:-8}
+
# Define no_tensorflow_py_deps=true so that every py_test has no deps anymore,
# which will result testing system installed tensorflow
# GPU tests are very flaky when running concurrently, so set local_test_jobs=1
bazel test --announce_rc --config=opt -k --test_output=errors \
+ --test_env=TF_GPU_COUNT \
+ --run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
--define=no_tensorflow_py_deps=true --test_lang_filters=py \
--test_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss \
--build_tag_filters=-no_pip,-no_windows,-no_windows_gpu,-no_gpu,-no_pip_gpu,-no_oss --build_tests_only \
- --local_test_jobs=1 --test_timeout="300,450,1200,3600" \
+ --test_size_filters=small,medium \
+ --local_test_jobs=$TF_GPU_COUNT --test_timeout="300,450,1200,3600" \
--flaky_test_attempts=3 \
- //${PY_TEST_DIR}/tensorflow/python/... \
- //${PY_TEST_DIR}/tensorflow/contrib/...
+ ${TEST_TARGET}
diff --git a/tensorflow/tools/compatibility/BUILD b/tensorflow/tools/compatibility/BUILD
index b7bfb29aae..55792c51fe 100644
--- a/tensorflow/tools/compatibility/BUILD
+++ b/tensorflow/tools/compatibility/BUILD
@@ -8,10 +8,17 @@ load(
"tf_cc_test", # @unused
)
+py_library(
+ name = "ast_edits",
+ srcs = ["ast_edits.py"],
+ srcs_version = "PY2AND3",
+)
+
py_binary(
name = "tf_upgrade",
srcs = ["tf_upgrade.py"],
srcs_version = "PY2AND3",
+ deps = [":ast_edits"],
)
py_test(
@@ -26,6 +33,28 @@ py_test(
],
)
+py_binary(
+ name = "tf_upgrade_v2",
+ srcs = [
+ "renames_v2.py",
+ "tf_upgrade_v2.py",
+ ],
+ srcs_version = "PY2AND3",
+ deps = [":ast_edits"],
+)
+
+py_test(
+ name = "tf_upgrade_v2_test",
+ srcs = ["tf_upgrade_v2_test.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ ":tf_upgrade_v2",
+ "//tensorflow/python:client_testlib",
+ "//tensorflow/python:framework_test_lib",
+ "@six_archive//:six",
+ ],
+)
+
# Keep for reference, this test will succeed in 0.11 but fail in 1.0
# py_test(
# name = "test_file_v0_11",
@@ -62,9 +91,37 @@ py_test(
],
)
+genrule(
+ name = "generate_upgraded_file_v2",
+ testonly = 1,
+ srcs = ["testdata/test_file_v1_10.py"],
+ outs = [
+ "test_file_v2_0.py",
+ "report_v2.txt",
+ ],
+ cmd = ("$(location :tf_upgrade_v2)" +
+ " --infile $(location testdata/test_file_v1_10.py)" +
+ " --outfile $(location test_file_v2_0.py)" +
+ " --reportfile $(location report_v2.txt)"),
+ tools = [":tf_upgrade_v2"],
+)
+
+py_test(
+ name = "test_file_v2_0",
+ size = "small",
+ srcs = ["test_file_v2_0.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ ],
+)
+
exports_files(
[
+ "ast_edits.py",
"tf_upgrade.py",
+ "renames_v2.py",
"testdata/test_file_v0_11.py",
+ "testdata/test_file_v1_10.py",
],
)
diff --git a/tensorflow/tools/compatibility/renames_v2.py b/tensorflow/tools/compatibility/renames_v2.py
new file mode 100644
index 0000000000..216aa41b60
--- /dev/null
+++ b/tensorflow/tools/compatibility/renames_v2.py
@@ -0,0 +1,134 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# pylint: disable=line-too-long
+"""List of renames to apply when converting from TF 1.0 to TF 2.0.
+
+THIS FILE IS AUTOGENERATED: To update, please run:
+ bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
+ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
+This file should be updated whenever endpoints are deprecated.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+renames = {
+ 'tf.acos': 'tf.math.acos',
+ 'tf.acosh': 'tf.math.acosh',
+ 'tf.add': 'tf.math.add',
+ 'tf.as_string': 'tf.dtypes.as_string',
+ 'tf.asin': 'tf.math.asin',
+ 'tf.asinh': 'tf.math.asinh',
+ 'tf.atan': 'tf.math.atan',
+ 'tf.atan2': 'tf.math.atan2',
+ 'tf.atanh': 'tf.math.atanh',
+ 'tf.batch_to_space_nd': 'tf.manip.batch_to_space_nd',
+ 'tf.betainc': 'tf.math.betainc',
+ 'tf.ceil': 'tf.math.ceil',
+ 'tf.check_numerics': 'tf.debugging.check_numerics',
+ 'tf.cholesky': 'tf.linalg.cholesky',
+ 'tf.cos': 'tf.math.cos',
+ 'tf.cosh': 'tf.math.cosh',
+ 'tf.cross': 'tf.linalg.cross',
+ 'tf.decode_base64': 'tf.io.decode_base64',
+ 'tf.decode_compressed': 'tf.io.decode_compressed',
+ 'tf.decode_json_example': 'tf.io.decode_json_example',
+ 'tf.decode_raw': 'tf.io.decode_raw',
+ 'tf.dequantize': 'tf.quantization.dequantize',
+ 'tf.diag': 'tf.linalg.tensor_diag',
+ 'tf.diag_part': 'tf.linalg.tensor_diag_part',
+ 'tf.digamma': 'tf.math.digamma',
+ 'tf.encode_base64': 'tf.io.encode_base64',
+ 'tf.equal': 'tf.math.equal',
+ 'tf.erfc': 'tf.math.erfc',
+ 'tf.exp': 'tf.math.exp',
+ 'tf.expm1': 'tf.math.expm1',
+ 'tf.extract_image_patches': 'tf.image.extract_image_patches',
+ 'tf.fake_quant_with_min_max_args': 'tf.quantization.fake_quant_with_min_max_args',
+ 'tf.fake_quant_with_min_max_args_gradient': 'tf.quantization.fake_quant_with_min_max_args_gradient',
+ 'tf.fake_quant_with_min_max_vars': 'tf.quantization.fake_quant_with_min_max_vars',
+ 'tf.fake_quant_with_min_max_vars_gradient': 'tf.quantization.fake_quant_with_min_max_vars_gradient',
+ 'tf.fake_quant_with_min_max_vars_per_channel': 'tf.quantization.fake_quant_with_min_max_vars_per_channel',
+ 'tf.fake_quant_with_min_max_vars_per_channel_gradient': 'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
+ 'tf.fft': 'tf.spectral.fft',
+ 'tf.floor': 'tf.math.floor',
+ 'tf.gather_nd': 'tf.manip.gather_nd',
+ 'tf.greater': 'tf.math.greater',
+ 'tf.greater_equal': 'tf.math.greater_equal',
+ 'tf.ifft': 'tf.spectral.ifft',
+ 'tf.igamma': 'tf.math.igamma',
+ 'tf.igammac': 'tf.math.igammac',
+ 'tf.invert_permutation': 'tf.math.invert_permutation',
+ 'tf.is_finite': 'tf.debugging.is_finite',
+ 'tf.is_inf': 'tf.debugging.is_inf',
+ 'tf.is_nan': 'tf.debugging.is_nan',
+ 'tf.less': 'tf.math.less',
+ 'tf.less_equal': 'tf.math.less_equal',
+ 'tf.lgamma': 'tf.math.lgamma',
+ 'tf.log': 'tf.math.log',
+ 'tf.log1p': 'tf.math.log1p',
+ 'tf.logical_and': 'tf.math.logical_and',
+ 'tf.logical_not': 'tf.math.logical_not',
+ 'tf.logical_or': 'tf.math.logical_or',
+ 'tf.matching_files': 'tf.io.matching_files',
+ 'tf.matrix_band_part': 'tf.linalg.band_part',
+ 'tf.matrix_determinant': 'tf.linalg.det',
+ 'tf.matrix_diag': 'tf.linalg.diag',
+ 'tf.matrix_diag_part': 'tf.linalg.diag_part',
+ 'tf.matrix_inverse': 'tf.linalg.inv',
+ 'tf.matrix_set_diag': 'tf.linalg.set_diag',
+ 'tf.matrix_solve': 'tf.linalg.solve',
+ 'tf.matrix_triangular_solve': 'tf.linalg.triangular_solve',
+ 'tf.maximum': 'tf.math.maximum',
+ 'tf.minimum': 'tf.math.minimum',
+ 'tf.not_equal': 'tf.math.not_equal',
+ 'tf.parse_tensor': 'tf.io.parse_tensor',
+ 'tf.polygamma': 'tf.math.polygamma',
+ 'tf.qr': 'tf.linalg.qr',
+ 'tf.quantized_concat': 'tf.quantization.quantized_concat',
+ 'tf.read_file': 'tf.io.read_file',
+ 'tf.reciprocal': 'tf.math.reciprocal',
+ 'tf.regex_replace': 'tf.strings.regex_replace',
+ 'tf.reshape': 'tf.manip.reshape',
+ 'tf.reverse': 'tf.manip.reverse',
+ 'tf.reverse_v2': 'tf.manip.reverse',
+ 'tf.rint': 'tf.math.rint',
+ 'tf.rsqrt': 'tf.math.rsqrt',
+ 'tf.scatter_nd': 'tf.manip.scatter_nd',
+ 'tf.segment_max': 'tf.math.segment_max',
+ 'tf.segment_mean': 'tf.math.segment_mean',
+ 'tf.segment_min': 'tf.math.segment_min',
+ 'tf.segment_prod': 'tf.math.segment_prod',
+ 'tf.segment_sum': 'tf.math.segment_sum',
+ 'tf.sin': 'tf.math.sin',
+ 'tf.sinh': 'tf.math.sinh',
+ 'tf.space_to_batch_nd': 'tf.manip.space_to_batch_nd',
+ 'tf.squared_difference': 'tf.math.squared_difference',
+ 'tf.string_join': 'tf.strings.join',
+ 'tf.string_strip': 'tf.strings.strip',
+ 'tf.string_to_hash_bucket': 'tf.strings.to_hash_bucket',
+ 'tf.string_to_hash_bucket_fast': 'tf.strings.to_hash_bucket_fast',
+ 'tf.string_to_hash_bucket_strong': 'tf.strings.to_hash_bucket_strong',
+ 'tf.string_to_number': 'tf.strings.to_number',
+ 'tf.substr': 'tf.strings.substr',
+ 'tf.tan': 'tf.math.tan',
+ 'tf.tile': 'tf.manip.tile',
+ 'tf.unsorted_segment_max': 'tf.math.unsorted_segment_max',
+ 'tf.unsorted_segment_min': 'tf.math.unsorted_segment_min',
+ 'tf.unsorted_segment_prod': 'tf.math.unsorted_segment_prod',
+ 'tf.unsorted_segment_sum': 'tf.math.unsorted_segment_sum',
+ 'tf.write_file': 'tf.io.write_file',
+ 'tf.zeta': 'tf.math.zeta'
+}
diff --git a/tensorflow/contrib/autograph/utils/type_hints.py b/tensorflow/tools/compatibility/testdata/test_file_v1_10.py
index aeb9e54561..a49035a1a0 100644
--- a/tensorflow/contrib/autograph/utils/type_hints.py
+++ b/tensorflow/tools/compatibility/testdata/test_file_v1_10.py
@@ -1,4 +1,4 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,30 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""No-op utilities that provide static type hints.
-
-These are used when the data type is not known at creation, for instance in the
-case of empty lists.
-"""
+"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+import tensorflow as tf
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import test as test_lib
-def set_element_type(entity, dtype, shape=None):
- """Indicates that the entity is expected hold items of specified type.
+class TestUpgrade(test_util.TensorFlowTestCase):
+ """Test various APIs that have been changed in 2.0."""
- This function is a no-op. Its presence merely marks the data type of its
- argument. The staged TensorFlow ops will reflect and assert this data type.
+ def testRenames(self):
+ with self.test_session():
+ self.assertAllClose(1.04719755, tf.acos(0.5).eval())
+ self.assertAllClose(0.5, tf.rsqrt(4.0).eval())
- Args:
- entity: A Tensor or TensorArray.
- dtype: TensorFlow dtype value to assert for entity.
- shape: Optional shape to assert for entity.
- Returns:
- The value of entity, unchanged.
- """
- del dtype
- del shape
- return entity
+if __name__ == "__main__":
+ test_lib.main()
diff --git a/tensorflow/tools/compatibility/tf_upgrade.py b/tensorflow/tools/compatibility/tf_upgrade.py
index 1f8833582a..96705b1a4c 100644
--- a/tensorflow/tools/compatibility/tf_upgrade.py
+++ b/tensorflow/tools/compatibility/tf_upgrade.py
@@ -19,491 +19,11 @@ from __future__ import division
from __future__ import print_function
import argparse
-import ast
-import collections
-import os
-import shutil
-import sys
-import tempfile
-import traceback
+from tensorflow.tools.compatibility import ast_edits
-class APIChangeSpec(object):
- """This class defines the transformations that need to happen.
- This class must provide the following fields:
-
- * `function_keyword_renames`: maps function names to a map of old -> new
- argument names
- * `function_renames`: maps function names to new function names
- * `change_to_function`: a set of function names that have changed (for
- notifications)
- * `function_reorders`: maps functions whose argument order has changed to the
- list of arguments in the new order
- * `function_handle`: maps function names to custom handlers for the function
-
- For an example, see `TFAPIChangeSpec`.
- """
-
-
-class _FileEditTuple(
- collections.namedtuple("_FileEditTuple",
- ["comment", "line", "start", "old", "new"])):
- """Each edit that is recorded by a _FileEditRecorder.
-
- Fields:
- comment: A description of the edit and why it was made.
- line: The line number in the file where the edit occurs (1-indexed).
- start: The line number in the file where the edit occurs (0-indexed).
- old: text string to remove (this must match what was in file).
- new: text string to add in place of `old`.
- """
-
- __slots__ = ()
-
-
-class _FileEditRecorder(object):
- """Record changes that need to be done to the file."""
-
- def __init__(self, filename):
- # all edits are lists of chars
- self._filename = filename
-
- self._line_to_edit = collections.defaultdict(list)
- self._errors = []
-
- def process(self, text):
- """Process a list of strings, each corresponding to the recorded changes.
-
- Args:
- text: A list of lines of text (assumed to contain newlines)
- Returns:
- A tuple of the modified text and a textual description of what is done.
- Raises:
- ValueError: if substitution source location does not have expected text.
- """
-
- change_report = ""
-
- # Iterate of each line
- for line, edits in self._line_to_edit.items():
- offset = 0
- # sort by column so that edits are processed in order in order to make
- # indexing adjustments cumulative for changes that change the string
- # length
- edits.sort(key=lambda x: x.start)
-
- # Extract each line to a list of characters, because mutable lists
- # are editable, unlike immutable strings.
- char_array = list(text[line - 1])
-
- # Record a description of the change
- change_report += "%r Line %d\n" % (self._filename, line)
- change_report += "-" * 80 + "\n\n"
- for e in edits:
- change_report += "%s\n" % e.comment
- change_report += "\n Old: %s" % (text[line - 1])
-
- # Make underscore buffers for underlining where in the line the edit was
- change_list = [" "] * len(text[line - 1])
- change_list_new = [" "] * len(text[line - 1])
-
- # Iterate for each edit
- for e in edits:
- # Create effective start, end by accounting for change in length due
- # to previous edits
- start_eff = e.start + offset
- end_eff = start_eff + len(e.old)
-
- # Make sure the edit is changing what it should be changing
- old_actual = "".join(char_array[start_eff:end_eff])
- if old_actual != e.old:
- raise ValueError("Expected text %r but got %r" %
- ("".join(e.old), "".join(old_actual)))
- # Make the edit
- char_array[start_eff:end_eff] = list(e.new)
-
- # Create the underline highlighting of the before and after
- change_list[e.start:e.start + len(e.old)] = "~" * len(e.old)
- change_list_new[start_eff:end_eff] = "~" * len(e.new)
-
- # Keep track of how to generate effective ranges
- offset += len(e.new) - len(e.old)
-
- # Finish the report comment
- change_report += " %s\n" % "".join(change_list)
- text[line - 1] = "".join(char_array)
- change_report += " New: %s" % (text[line - 1])
- change_report += " %s\n\n" % "".join(change_list_new)
- return "".join(text), change_report, self._errors
-
- def add(self, comment, line, start, old, new, error=None):
- """Add a new change that is needed.
-
- Args:
- comment: A description of what was changed
- line: Line number (1 indexed)
- start: Column offset (0 indexed)
- old: old text
- new: new text
- error: this "edit" is something that cannot be fixed automatically
- Returns:
- None
- """
-
- self._line_to_edit[line].append(
- _FileEditTuple(comment, line, start, old, new))
- if error:
- self._errors.append("%s:%d: %s" % (self._filename, line, error))
-
-
-class _ASTCallVisitor(ast.NodeVisitor):
- """AST Visitor that processes function calls.
-
- Updates function calls from old API version to new API version using a given
- change spec.
- """
-
- def __init__(self, filename, lines, api_change_spec):
- self._filename = filename
- self._file_edit = _FileEditRecorder(filename)
- self._lines = lines
- self._api_change_spec = api_change_spec
-
- def process(self, lines):
- return self._file_edit.process(lines)
-
- def generic_visit(self, node):
- ast.NodeVisitor.generic_visit(self, node)
-
- def _rename_functions(self, node, full_name):
- function_renames = self._api_change_spec.function_renames
- try:
- new_name = function_renames[full_name]
- self._file_edit.add("Renamed function %r to %r" % (full_name, new_name),
- node.lineno, node.col_offset, full_name, new_name)
- except KeyError:
- pass
-
- def _get_attribute_full_path(self, node):
- """Traverse an attribute to generate a full name e.g. tf.foo.bar.
-
- Args:
- node: A Node of type Attribute.
-
- Returns:
- a '.'-delimited full-name or None if the tree was not a simple form.
- i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
- """
- curr = node
- items = []
- while not isinstance(curr, ast.Name):
- if not isinstance(curr, ast.Attribute):
- return None
- items.append(curr.attr)
- curr = curr.value
- items.append(curr.id)
- return ".".join(reversed(items))
-
- def _find_true_position(self, node):
- """Return correct line number and column offset for a given node.
-
- This is necessary mainly because ListComp's location reporting reports
- the next token after the list comprehension list opening.
-
- Args:
- node: Node for which we wish to know the lineno and col_offset
- """
- import re
- find_open = re.compile("^\s*(\\[).*$")
- find_string_chars = re.compile("['\"]")
-
- if isinstance(node, ast.ListComp):
- # Strangely, ast.ListComp returns the col_offset of the first token
- # after the '[' token which appears to be a bug. Workaround by
- # explicitly finding the real start of the list comprehension.
- line = node.lineno
- col = node.col_offset
- # loop over lines
- while 1:
- # Reverse the text to and regular expression search for whitespace
- text = self._lines[line - 1]
- reversed_preceding_text = text[:col][::-1]
- # First find if a [ can be found with only whitespace between it and
- # col.
- m = find_open.match(reversed_preceding_text)
- if m:
- new_col_offset = col - m.start(1) - 1
- return line, new_col_offset
- else:
- if (reversed_preceding_text == "" or
- reversed_preceding_text.isspace()):
- line = line - 1
- prev_line = self._lines[line - 1]
- # TODO(aselle):
- # this is poor comment detection, but it is good enough for
- # cases where the comment does not contain string literal starting/
- # ending characters. If ast gave us start and end locations of the
- # ast nodes rather than just start, we could use string literal
- # node ranges to filter out spurious #'s that appear in string
- # literals.
- comment_start = prev_line.find("#")
- if comment_start == -1:
- col = len(prev_line) - 1
- elif find_string_chars.search(prev_line[comment_start:]) is None:
- col = comment_start
- else:
- return None, None
- else:
- return None, None
- # Most other nodes return proper locations (with notably does not), but
- # it is not possible to use that in an argument.
- return node.lineno, node.col_offset
-
- def visit_Call(self, node): # pylint: disable=invalid-name
- """Handle visiting a call node in the AST.
-
- Args:
- node: Current Node
- """
-
- # Find a simple attribute name path e.g. "tf.foo.bar"
- full_name = self._get_attribute_full_path(node.func)
-
- # Make sure the func is marked as being part of a call
- node.func.is_function_for_call = True
-
- if full_name:
- # Call special handlers
- function_handles = self._api_change_spec.function_handle
- if full_name in function_handles:
- function_handles[full_name](self._file_edit, node)
-
- # Examine any non-keyword argument and make it into a keyword argument
- # if reordering required.
- function_reorders = self._api_change_spec.function_reorders
- function_keyword_renames = (
- self._api_change_spec.function_keyword_renames)
-
- if full_name in function_reorders:
- reordered = function_reorders[full_name]
- for idx, arg in enumerate(node.args):
- lineno, col_offset = self._find_true_position(arg)
- if lineno is None or col_offset is None:
- self._file_edit.add(
- "Failed to add keyword %r to reordered function %r" %
- (reordered[idx], full_name),
- arg.lineno,
- arg.col_offset,
- "",
- "",
- error="A necessary keyword argument failed to be inserted.")
- else:
- keyword_arg = reordered[idx]
- if (full_name in function_keyword_renames and
- keyword_arg in function_keyword_renames[full_name]):
- keyword_arg = function_keyword_renames[full_name][keyword_arg]
- self._file_edit.add("Added keyword %r to reordered function %r" %
- (reordered[idx], full_name), lineno, col_offset,
- "", keyword_arg + "=")
-
- # Examine each keyword argument and convert it to the final renamed form
- renamed_keywords = ({} if full_name not in function_keyword_renames else
- function_keyword_renames[full_name])
- for keyword in node.keywords:
- argkey = keyword.arg
- argval = keyword.value
-
- if argkey in renamed_keywords:
- argval_lineno, argval_col_offset = self._find_true_position(argval)
- if argval_lineno is not None and argval_col_offset is not None:
- # TODO(aselle): We should scan backward to find the start of the
- # keyword key. Unfortunately ast does not give you the location of
- # keyword keys, so we are forced to infer it from the keyword arg
- # value.
- key_start = argval_col_offset - len(argkey) - 1
- key_end = key_start + len(argkey) + 1
- if (self._lines[argval_lineno - 1][key_start:key_end] == argkey +
- "="):
- self._file_edit.add("Renamed keyword argument from %r to %r" %
- (argkey,
- renamed_keywords[argkey]), argval_lineno,
- argval_col_offset - len(argkey) - 1,
- argkey + "=", renamed_keywords[argkey] + "=")
- continue
- self._file_edit.add(
- "Failed to rename keyword argument from %r to %r" %
- (argkey, renamed_keywords[argkey]),
- argval.lineno,
- argval.col_offset - len(argkey) - 1,
- "",
- "",
- error="Failed to find keyword lexographically. Fix manually.")
-
- ast.NodeVisitor.generic_visit(self, node)
-
- def visit_Attribute(self, node): # pylint: disable=invalid-name
- """Handle bare Attributes i.e. [tf.foo, tf.bar].
-
- Args:
- node: Node that is of type ast.Attribute
- """
- full_name = self._get_attribute_full_path(node)
- if full_name:
- self._rename_functions(node, full_name)
- if full_name in self._api_change_spec.change_to_function:
- if not hasattr(node, "is_function_for_call"):
- new_text = full_name + "()"
- self._file_edit.add("Changed %r to %r" % (full_name, new_text),
- node.lineno, node.col_offset, full_name, new_text)
-
- ast.NodeVisitor.generic_visit(self, node)
-
-
-class ASTCodeUpgrader(object):
- """Handles upgrading a set of Python files using a given API change spec."""
-
- def __init__(self, api_change_spec):
- if not isinstance(api_change_spec, APIChangeSpec):
- raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
- type(api_change_spec))
- self._api_change_spec = api_change_spec
-
- def process_file(self, in_filename, out_filename):
- """Process the given python file for incompatible changes.
-
- Args:
- in_filename: filename to parse
- out_filename: output file to write to
- Returns:
- A tuple representing number of files processed, log of actions, errors
- """
-
- # Write to a temporary file, just in case we are doing an implace modify.
- with open(in_filename, "r") as in_file, \
- tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
- ret = self.process_opened_file(in_filename, in_file, out_filename,
- temp_file)
-
- shutil.move(temp_file.name, out_filename)
- return ret
-
- # Broad exceptions are required here because ast throws whatever it wants.
- # pylint: disable=broad-except
- def process_opened_file(self, in_filename, in_file, out_filename, out_file):
- """Process the given python file for incompatible changes.
-
- This function is split out to facilitate StringIO testing from
- tf_upgrade_test.py.
-
- Args:
- in_filename: filename to parse
- in_file: opened file (or StringIO)
- out_filename: output file to write to
- out_file: opened file (or StringIO)
- Returns:
- A tuple representing number of files processed, log of actions, errors
- """
- process_errors = []
- text = "-" * 80 + "\n"
- text += "Processing file %r\n outputting to %r\n" % (in_filename,
- out_filename)
- text += "-" * 80 + "\n\n"
-
- parsed_ast = None
- lines = in_file.readlines()
- try:
- parsed_ast = ast.parse("".join(lines))
- except Exception:
- text += "Failed to parse %r\n\n" % in_filename
- text += traceback.format_exc()
- if parsed_ast:
- visitor = _ASTCallVisitor(in_filename, lines, self._api_change_spec)
- visitor.visit(parsed_ast)
- out_text, new_text, process_errors = visitor.process(lines)
- text += new_text
- if out_file:
- out_file.write(out_text)
- text += "\n"
- return 1, text, process_errors
-
- # pylint: enable=broad-except
-
- def process_tree(self, root_directory, output_root_directory,
- copy_other_files):
- """Processes upgrades on an entire tree of python files in place.
-
- Note that only Python files. If you have custom code in other languages,
- you will need to manually upgrade those.
-
- Args:
- root_directory: Directory to walk and process.
- output_root_directory: Directory to use as base.
- copy_other_files: Copy files that are not touched by this converter.
-
- Returns:
- A tuple of files processed, the report string ofr all files, and errors
- """
-
- # make sure output directory doesn't exist
- if output_root_directory and os.path.exists(output_root_directory):
- print("Output directory %r must not already exist." %
- (output_root_directory))
- sys.exit(1)
-
- # make sure output directory does not overlap with root_directory
- norm_root = os.path.split(os.path.normpath(root_directory))
- norm_output = os.path.split(os.path.normpath(output_root_directory))
- if norm_root == norm_output:
- print("Output directory %r same as input directory %r" %
- (root_directory, output_root_directory))
- sys.exit(1)
-
- # Collect list of files to process (we do this to correctly handle if the
- # user puts the output directory in some sub directory of the input dir)
- files_to_process = []
- files_to_copy = []
- for dir_name, _, file_list in os.walk(root_directory):
- py_files = [f for f in file_list if f.endswith(".py")]
- copy_files = [f for f in file_list if not f.endswith(".py")]
- for filename in py_files:
- fullpath = os.path.join(dir_name, filename)
- fullpath_output = os.path.join(output_root_directory,
- os.path.relpath(fullpath,
- root_directory))
- files_to_process.append((fullpath, fullpath_output))
- if copy_other_files:
- for filename in copy_files:
- fullpath = os.path.join(dir_name, filename)
- fullpath_output = os.path.join(output_root_directory,
- os.path.relpath(
- fullpath, root_directory))
- files_to_copy.append((fullpath, fullpath_output))
-
- file_count = 0
- tree_errors = []
- report = ""
- report += ("=" * 80) + "\n"
- report += "Input tree: %r\n" % root_directory
- report += ("=" * 80) + "\n"
-
- for input_path, output_path in files_to_process:
- output_directory = os.path.dirname(output_path)
- if not os.path.isdir(output_directory):
- os.makedirs(output_directory)
- file_count += 1
- _, l_report, l_errors = self.process_file(input_path, output_path)
- tree_errors += l_errors
- report += l_report
- for input_path, output_path in files_to_copy:
- output_directory = os.path.dirname(output_path)
- if not os.path.isdir(output_directory):
- os.makedirs(output_directory)
- shutil.copy(input_path, output_path)
- return file_count, report, tree_errors
-
-
-class TFAPIChangeSpec(APIChangeSpec):
+class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"""List of maps that describe what changed in the API."""
def __init__(self):
@@ -718,7 +238,7 @@ Simple usage:
default="report.txt")
args = parser.parse_args()
- upgrade = ASTCodeUpgrader(TFAPIChangeSpec())
+ upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
report_text = None
report_filename = args.report_filename
files_processed = 0
diff --git a/tensorflow/tools/compatibility/tf_upgrade_test.py b/tensorflow/tools/compatibility/tf_upgrade_test.py
index 3d02eacba6..66325ea2ad 100644
--- a/tensorflow/tools/compatibility/tf_upgrade_test.py
+++ b/tensorflow/tools/compatibility/tf_upgrade_test.py
@@ -22,6 +22,7 @@ import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
+from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade
@@ -36,7 +37,7 @@ class TestUpgrade(test_util.TensorFlowTestCase):
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
- upgrader = tf_upgrade.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
@@ -139,7 +140,7 @@ class TestUpgradeFiles(test_util.TensorFlowTestCase):
upgraded = "tf.multiply(a, b)\n"
temp_file.write(original)
temp_file.close()
- upgrader = tf_upgrade.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2.py b/tensorflow/tools/compatibility/tf_upgrade_v2.py
new file mode 100644
index 0000000000..9702430a12
--- /dev/null
+++ b/tensorflow/tools/compatibility/tf_upgrade_v2.py
@@ -0,0 +1,115 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import argparse
+
+from tensorflow.tools.compatibility import ast_edits
+from tensorflow.tools.compatibility import renames_v2
+
+
+class TFAPIChangeSpec(ast_edits.APIChangeSpec):
+ """List of maps that describe what changed in the API."""
+
+ def __init__(self):
+ # Maps from a function name to a dictionary that describes how to
+ # map from an old argument keyword to the new argument keyword.
+ self.function_keyword_renames = {}
+
+ # Mapping from function to the new name of the function
+ self.function_renames = renames_v2.renames
+
+ # Variables that should be changed to functions.
+ self.change_to_function = {}
+
+ # Functions that were reordered should be changed to the new keyword args
+ # for safety, if positional arguments are used. If you have reversed the
+ # positional arguments yourself, this could do the wrong thing.
+ self.function_reorders = {}
+
+ # Specially handled functions.
+ self.function_handle = {}
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description="""Convert a TensorFlow Python file to 2.0
+
+Simple usage:
+ tf_convert_v2.py --infile foo.py --outfile bar.py
+ tf_convert_v2.py --intree ~/code/old --outtree ~/code/new
+""")
+ parser.add_argument(
+ "--infile",
+ dest="input_file",
+ help="If converting a single file, the name of the file "
+ "to convert")
+ parser.add_argument(
+ "--outfile",
+ dest="output_file",
+ help="If converting a single file, the output filename.")
+ parser.add_argument(
+ "--intree",
+ dest="input_tree",
+ help="If converting a whole tree of files, the directory "
+ "to read from (relative or absolute).")
+ parser.add_argument(
+ "--outtree",
+ dest="output_tree",
+ help="If converting a whole tree of files, the output "
+ "directory (relative or absolute).")
+ parser.add_argument(
+ "--copyotherfiles",
+ dest="copy_other_files",
+ help=("If converting a whole tree of files, whether to "
+ "copy the other files."),
+ type=bool,
+ default=False)
+ parser.add_argument(
+ "--reportfile",
+ dest="report_filename",
+ help=("The name of the file where the report log is "
+ "stored."
+ "(default: %(default)s)"),
+ default="report.txt")
+ args = parser.parse_args()
+
+ upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
+ report_text = None
+ report_filename = args.report_filename
+ files_processed = 0
+ if args.input_file:
+ files_processed, report_text, errors = upgrade.process_file(
+ args.input_file, args.output_file)
+ files_processed = 1
+ elif args.input_tree:
+ files_processed, report_text, errors = upgrade.process_tree(
+ args.input_tree, args.output_tree, args.copy_other_files)
+ else:
+ parser.print_help()
+ if report_text:
+ open(report_filename, "w").write(report_text)
+ print("TensorFlow 2.0 Upgrade Script")
+ print("-----------------------------")
+ print("Converted %d files\n" % files_processed)
+ print("Detected %d errors that require attention" % len(errors))
+ print("-" * 80)
+ print("\n".join(errors))
+ print("\nMake sure to read the detailed log %r\n" % report_filename)
diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
new file mode 100644
index 0000000000..57ac04de06
--- /dev/null
+++ b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py
@@ -0,0 +1,83 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tf 2.0 upgrader."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+import os
+import tempfile
+import six
+from tensorflow.python.framework import test_util
+from tensorflow.python.platform import test as test_lib
+from tensorflow.tools.compatibility import ast_edits
+from tensorflow.tools.compatibility import tf_upgrade_v2
+
+
+class TestUpgrade(test_util.TensorFlowTestCase):
+ """Test various APIs that have been changed in 2.0.
+
+ We also test whether a converted file is executable. test_file_v1_10.py
+ aims to exhaustively test that API changes are convertible and actually
+ work when run with current TensorFlow.
+ """
+
+ def _upgrade(self, old_file_text):
+ in_file = six.StringIO(old_file_text)
+ out_file = six.StringIO()
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
+ count, report, errors = (
+ upgrader.process_opened_file("test.py", in_file,
+ "test_out.py", out_file))
+ return count, report, errors, out_file.getvalue()
+
+ def testParseError(self):
+ _, report, unused_errors, unused_new_text = self._upgrade(
+ "import tensorflow as tf\na + \n")
+ self.assertTrue(report.find("Failed to parse") != -1)
+
+ def testReport(self):
+ text = "tf.acos(a)\n"
+ _, report, unused_errors, unused_new_text = self._upgrade(text)
+ # This is not a complete test, but it is a sanity test that a report
+ # is generating information.
+ self.assertTrue(report.find("Renamed function `tf.acos` to `tf.math.acos`"))
+
+ def testRename(self):
+ text = "tf.acos(a)\n"
+ _, unused_report, unused_errors, new_text = self._upgrade(text)
+ self.assertEqual(new_text, "tf.math.acos(a)\n")
+ text = "tf.rsqrt(tf.log(3.8))\n"
+ _, unused_report, unused_errors, new_text = self._upgrade(text)
+ self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log(3.8))\n")
+
+
+class TestUpgradeFiles(test_util.TensorFlowTestCase):
+
+ def testInplace(self):
+ """Check to make sure we don't have a file system race."""
+ temp_file = tempfile.NamedTemporaryFile("w", delete=False)
+ original = "tf.acos(a, b)\n"
+ upgraded = "tf.math.acos(a, b)\n"
+ temp_file.write(original)
+ temp_file.close()
+ upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
+ upgrader.process_file(temp_file.name, temp_file.name)
+ self.assertAllEqual(open(temp_file.name).read(), upgraded)
+ os.unlink(temp_file.name)
+
+
+if __name__ == "__main__":
+ test_lib.main()
diff --git a/tensorflow/tools/compatibility/update/BUILD b/tensorflow/tools/compatibility/update/BUILD
new file mode 100644
index 0000000000..feb37c902e
--- /dev/null
+++ b/tensorflow/tools/compatibility/update/BUILD
@@ -0,0 +1,15 @@
+licenses(["notice"]) # Apache 2.0
+
+package(default_visibility = ["//visibility:private"])
+
+py_binary(
+ name = "generate_v2_renames_map",
+ srcs = ["generate_v2_renames_map.py"],
+ srcs_version = "PY2AND3",
+ deps = [
+ "//tensorflow:tensorflow_py",
+ "//tensorflow/python:lib",
+ "//tensorflow/tools/common:public_api",
+ "//tensorflow/tools/common:traverse",
+ ],
+)
diff --git a/tensorflow/tools/compatibility/update/generate_v2_renames_map.py b/tensorflow/tools/compatibility/update/generate_v2_renames_map.py
new file mode 100644
index 0000000000..567eceb0b6
--- /dev/null
+++ b/tensorflow/tools/compatibility/update/generate_v2_renames_map.py
@@ -0,0 +1,103 @@
+# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# pylint: disable=line-too-long
+"""Script for updating tensorflow/tools/compatibility/renames_v2.py.
+
+To update renames_v2.py, run:
+ bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
+ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
+"""
+# pylint: enable=line-too-long
+
+import tensorflow as tf
+
+from tensorflow.python.lib.io import file_io
+from tensorflow.python.util import tf_decorator
+from tensorflow.python.util import tf_export
+from tensorflow.tools.common import public_api
+from tensorflow.tools.common import traverse
+
+
+_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/renames_v2.py'
+_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+# pylint: disable=line-too-long
+\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0.
+
+THIS FILE IS AUTOGENERATED: To update, please run:
+ bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
+ bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
+This file should be updated whenever endpoints are deprecated.
+\"\"\"
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+"""
+
+
+def update_renames_v2(output_file_path):
+ """Writes a Python dictionary mapping deprecated to canonical API names.
+
+ Args:
+ output_file_path: File path to write output to. Any existing contents
+ would be replaced.
+ """
+ # Set of rename lines to write to output file in the form:
+ # 'tf.deprecated_name': 'tf.canonical_name'
+ rename_line_set = set()
+ # _tf_api_names attribute name
+ tensorflow_api_attr = tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names
+
+ def visit(unused_path, unused_parent, children):
+ """Visitor that collects rename strings to add to rename_line_set."""
+ for child in children:
+ _, attr = tf_decorator.unwrap(child[1])
+ if not hasattr(attr, '__dict__'):
+ continue
+ api_names = attr.__dict__.get(tensorflow_api_attr, [])
+ deprecated_api_names = attr.__dict__.get('_tf_deprecated_api_names', [])
+ canonical_name = tf_export.get_canonical_name(
+ api_names, deprecated_api_names)
+ for name in deprecated_api_names:
+ rename_line_set.add(' \'tf.%s\': \'tf.%s\'' % (name, canonical_name))
+
+ visitor = public_api.PublicAPIVisitor(visit)
+ visitor.do_not_descend_map['tf'].append('contrib')
+ traverse.traverse(tf, visitor)
+
+ renames_file_text = '%srenames = {\n%s\n}\n' % (
+ _FILE_HEADER, ',\n'.join(sorted(rename_line_set)))
+ file_io.write_string_to_file(output_file_path, renames_file_text)
+
+
+def main(unused_argv):
+ update_renames_v2(_OUTPUT_FILE_PATH)
+
+
+if __name__ == '__main__':
+ tf.app.run(main=main)
diff --git a/tensorflow/tools/docker/Dockerfile.devel b/tensorflow/tools/docker/Dockerfile.devel
index fd94d64268..f7fe4119da 100644
--- a/tensorflow/tools/docker/Dockerfile.devel
+++ b/tensorflow/tools/docker/Dockerfile.devel
@@ -63,7 +63,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.14.1
+ENV BAZEL_VERSION 0.15.0
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu b/tensorflow/tools/docker/Dockerfile.devel-gpu
index 5ec43b8cb8..340f96df48 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu
@@ -15,6 +15,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
git \
libcudnn7=7.1.4.18-1+cuda9.0 \
libcudnn7-dev=7.1.4.18-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
+ libnccl-dev=2.2.13-1+cuda9.0 \
libcurl3-dev \
libfreetype6-dev \
libhdf5-serial-dev \
@@ -33,6 +35,11 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
+# Link NCCL libray and header where the build script expects them.
+RUN mkdir /usr/local/cuda-9.0/lib && \
+ ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
+ ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
+
RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
python get-pip.py && \
rm get-pip.py
@@ -72,7 +79,7 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
>>/etc/bazel.bazelrc
# Install the most recent bazel release.
-ENV BAZEL_VERSION 0.14.1
+ENV BAZEL_VERSION 0.15.0
WORKDIR /
RUN mkdir /bazel && \
cd /bazel && \
@@ -91,10 +98,13 @@ RUN git clone --branch=r1.9 --depth=1 https://github.com/tensorflow/tensorflow.g
ENV CI_BUILD_PYTHON python
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
ENV TF_NEED_CUDA 1
-ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0,6.1
+ENV TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0
ENV TF_CUDA_VERSION=9.0
ENV TF_CUDNN_VERSION=7
+# NCCL 2.x
+ENV TF_NCCL_VERSION=2
+
RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:${LD_LIBRARY_PATH} \
tensorflow/tools/ci_build/builds/configured GPU \
diff --git a/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7 b/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7
index 3bedc8cf34..30bc2d2806 100644
--- a/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7
+++ b/tensorflow/tools/docker/Dockerfile.devel-gpu-cuda9-cudnn7
@@ -4,7 +4,7 @@ LABEL maintainer="Gunhan Gulsoy <gunan@google.com>"
# It is possible to override these for releases.
ARG TF_BRANCH=master
-ARG BAZEL_VERSION=0.5.4
+ARG BAZEL_VERSION=0.15.0
ARG TF_AVAILABLE_CPUS=32
RUN apt-get update && apt-get install -y --no-install-recommends \
diff --git a/tensorflow/tools/docker/Dockerfile.gpu b/tensorflow/tools/docker/Dockerfile.gpu
index 9197651ff4..28d4371da3 100644
--- a/tensorflow/tools/docker/Dockerfile.gpu
+++ b/tensorflow/tools/docker/Dockerfile.gpu
@@ -13,6 +13,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
cuda-cusparse-9-0 \
curl \
libcudnn7=7.1.4.18-1+cuda9.0 \
+ libnccl2=2.2.13-1+cuda9.0 \
libfreetype6-dev \
libhdf5-serial-dev \
libpng12-dev \
diff --git a/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb b/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb
index 0633b03259..8fa871ef77 100644
--- a/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb
+++ b/tensorflow/tools/docker/notebooks/1_hello_tensorflow.ipynb
@@ -665,7 +665,7 @@
"source": [
"## What's next?\n",
"\n",
- "This has been a gentle introduction to TensorFlow, focused on what TensorFlow is and the very basics of doing anything in TensorFlow. If you'd like more, the next tutorial in the series is Getting Started with TensorFlow, also available in the [notebooks directory](..)."
+ "This has been a gentle introduction to TensorFlow, focused on what TensorFlow is and the very basics of doing anything in TensorFlow. If you'd like more, the next tutorial in the series is Getting Started with TensorFlow, also available in the [notebooks directory](../notebooks)."
]
}
],
diff --git a/tensorflow/tools/docs/doc_generator_visitor.py b/tensorflow/tools/docs/doc_generator_visitor.py
index 259a4694fd..c090dbd8da 100644
--- a/tensorflow/tools/docs/doc_generator_visitor.py
+++ b/tensorflow/tools/docs/doc_generator_visitor.py
@@ -20,6 +20,7 @@ from __future__ import print_function
import six
+from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
@@ -201,7 +202,6 @@ class DocGeneratorVisitor(object):
raw_duplicates[master_name] = [master_name, full_name]
else:
reverse_index[object_id] = full_name
-
# Decide on master names, rewire duplicates and make a duplicate_of map
# mapping all non-master duplicates to the master name. The master symbol
# does not have an entry in this map.
@@ -211,10 +211,15 @@ class DocGeneratorVisitor(object):
duplicates = {}
for names in raw_duplicates.values():
names = sorted(names)
-
- # Choose the lexicographically first name with the minimum number of
- # submodules. This will prefer highest level namespace for any symbol.
- master_name = min(names, key=lambda name: name.count('.'))
+ master_name = (
+ tf_export.get_canonical_name_for_symbol(self._index[names[0]])
+ if names else None)
+ if master_name:
+ master_name = 'tf.%s' % master_name
+ else:
+ # Choose the lexicographically first name with the minimum number of
+ # submodules. This will prefer highest level namespace for any symbol.
+ master_name = min(names, key=lambda name: name.count('.'))
duplicates[master_name] = names
for name in names:
diff --git a/tensorflow/tools/docs/generate.py b/tensorflow/tools/docs/generate.py
index fc93085e3e..f96887e4c7 100644
--- a/tensorflow/tools/docs/generate.py
+++ b/tensorflow/tools/docs/generate.py
@@ -31,6 +31,11 @@ if __name__ == '__main__':
doc_generator = generate_lib.DocGenerator()
doc_generator.add_output_dir_argument()
doc_generator.add_src_dir_argument()
+ doc_generator.argument_parser.add_argument(
+ '--site_api_path',
+ type=str, default='api_docs/python',
+ help='The path from the site-root to api_docs'
+ 'directory for this project')
# This doc generator works on the TensorFlow codebase. Since this script lives
# at tensorflow/tools/docs, and all code is defined somewhere inside
diff --git a/tensorflow/tools/docs/generate_lib.py b/tensorflow/tools/docs/generate_lib.py
index e7634cd5dc..4f70a69364 100644
--- a/tensorflow/tools/docs/generate_lib.py
+++ b/tensorflow/tools/docs/generate_lib.py
@@ -55,7 +55,8 @@ def write_docs(output_dir,
parser_config,
yaml_toc,
root_title='TensorFlow',
- search_hints=True):
+ search_hints=True,
+ site_api_path=None):
"""Write previously extracted docs to disk.
Write a docs page for each symbol included in the indices of parser_config to
@@ -73,6 +74,8 @@ def write_docs(output_dir,
root_title: The title name for the root level index.md.
search_hints: (bool) include meta-data search hints at the top of each
output file.
+ site_api_path: Used to write the api-duplicates _redirects.yaml file. if
+ None (the default) the file is not generated.
Raises:
ValueError: if `output_dir` is not an absolute path
@@ -92,6 +95,9 @@ def write_docs(output_dir,
# - symbol name(string):pathname (string)
symbol_to_file = {}
+ # Collect redirects for an api _redirects.yaml file.
+ redirects = ['redirects:\n']
+
# Parse and write Markdown pages, resolving cross-links (@{symbol}).
for full_name, py_object in six.iteritems(parser_config.index):
parser_config.reference_resolver.current_doc_full_name = full_name
@@ -150,6 +156,25 @@ def write_docs(output_dir,
raise OSError(
'Cannot write documentation for %s to %s' % (full_name, directory))
+ if site_api_path:
+ duplicates = parser_config.duplicates.get(full_name, [])
+ if not duplicates:
+ continue
+
+ duplicates = [item for item in duplicates if item != full_name]
+ template = ('- from: /{}\n'
+ ' to: /{}\n')
+ for dup in duplicates:
+ from_path = os.path.join(site_api_path, dup.replace('.', '/'))
+ to_path = os.path.join(site_api_path, full_name.replace('.', '/'))
+ redirects.append(
+ template.format(from_path, to_path))
+
+ if site_api_path:
+ api_redirects_path = os.path.join(output_dir, '_redirects.yaml')
+ with open(api_redirects_path, 'w') as redirect_file:
+ redirect_file.write(''.join(redirects))
+
if yaml_toc:
# Generate table of contents
@@ -608,7 +633,8 @@ class DocGenerator(object):
parser_config,
yaml_toc=self.yaml_toc,
root_title=root_title,
- search_hints=getattr(flags, 'search_hints', True))
+ search_hints=getattr(flags, 'search_hints', True),
+ site_api_path=getattr(flags, 'site_api_path', None))
# Replace all the @{} references in files under `FLAGS.src_dir`
replace_refs(flags.src_dir, flags.output_dir, reference_resolver, '*.md')
diff --git a/tensorflow/tools/docs/generate_lib_test.py b/tensorflow/tools/docs/generate_lib_test.py
index 7a6f9fd9f7..de18b13254 100644
--- a/tensorflow/tools/docs/generate_lib_test.py
+++ b/tensorflow/tools/docs/generate_lib_test.py
@@ -107,7 +107,18 @@ class GenerateTest(googletest.TestCase):
output_dir = googletest.GetTempDir()
- generate_lib.write_docs(output_dir, parser_config, yaml_toc=True)
+ generate_lib.write_docs(output_dir, parser_config, yaml_toc=True,
+ site_api_path='api_docs/python')
+
+ # Check redirects
+ redirects_file = os.path.join(output_dir, '_redirects.yaml')
+ self.assertTrue(os.path.exists(redirects_file))
+ with open(redirects_file) as f:
+ redirects = f.read()
+ self.assertEqual(redirects.split(), [
+ 'redirects:', '-', 'from:', '/api_docs/python/tf/test_function', 'to:',
+ '/api_docs/python/tf/TestModule/test_function'
+ ])
# Make sure that the right files are written to disk.
self.assertTrue(os.path.exists(os.path.join(output_dir, 'index.md')))
diff --git a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
index f1d361e07d..156636ab82 100644
--- a/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
+++ b/tensorflow/tools/graph_transforms/fold_old_batch_norms.cc
@@ -159,7 +159,7 @@ Status FuseScaleOffsetToConvWeights(const std::vector<float>& scale_values,
NodeDef bias_add_node;
bias_add_node.set_op("BiasAdd");
bias_add_node.set_name(conv_output_name);
- if (!conv_node.attr().count("data_format")) {
+ if (conv_node.attr().count("data_format") > 0) {
CopyNodeAttr(conv_node, "data_format", "data_format", &bias_add_node);
}
CopyNodeAttr(conv_node, "T", "T", &bias_add_node);
diff --git a/tensorflow/tools/graph_transforms/transform_utils.cc b/tensorflow/tools/graph_transforms/transform_utils.cc
index af17fd75bc..cb084e49b7 100644
--- a/tensorflow/tools/graph_transforms/transform_utils.cc
+++ b/tensorflow/tools/graph_transforms/transform_utils.cc
@@ -247,9 +247,16 @@ Status SortByExecutionOrder(const GraphDef& input_graph_def,
}
}
- if (processed < input_graph_def.node_size()) {
- return errors::InvalidArgument(input_graph_def.node_size() - processed,
- " nodes in a cycle");
+ if (processed < num_nodes) {
+ LOG(WARNING) << "IN " << __func__ << (num_nodes - processed)
+ << " NODES IN A CYCLE";
+ for (int64 i = 0; i < num_nodes; i++) {
+ if (pending_count[i] != 0) {
+ LOG(WARNING) << "PENDING: " << SummarizeNodeDef(input_graph_def.node(i))
+ << "WITH PENDING COUNT = " << pending_count[i];
+ }
+ }
+ return errors::InvalidArgument(num_nodes - processed, " nodes in a cycle");
}
return Status::OK();
}
diff --git a/tensorflow/tools/lib_package/BUILD b/tensorflow/tools/lib_package/BUILD
index 173f418dc8..44d8a37a8f 100644
--- a/tensorflow/tools/lib_package/BUILD
+++ b/tensorflow/tools/lib_package/BUILD
@@ -143,6 +143,7 @@ genrule(
"@zlib_archive//:zlib.h",
] + if_mkl([
"//third_party/mkl:LICENSE",
+ "//third_party/mkl_dnn:LICENSE",
]),
outs = ["include/tensorflow/c/LICENSE"],
cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
@@ -182,6 +183,7 @@ genrule(
"@zlib_archive//:zlib.h",
] + if_mkl([
"//third_party/mkl:LICENSE",
+ "//third_party/mkl_dnn:LICENSE",
]),
outs = ["include/tensorflow/jni/LICENSE"],
cmd = "$(location :concat_licenses.sh) $(SRCS) >$@",
diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD
index c9d53f46c3..ab39ed8d69 100644
--- a/tensorflow/tools/pip_package/BUILD
+++ b/tensorflow/tools/pip_package/BUILD
@@ -11,7 +11,7 @@ load(
)
load("//third_party/mkl:build_defs.bzl", "if_mkl")
load("//tensorflow:tensorflow.bzl", "if_cuda")
-load("@local_config_tensorrt//:build_defs.bzl", "if_tensorrt")
+load("@local_config_syslibs//:build_defs.bzl", "if_not_system_lib")
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_license_deps")
# This returns a list of headers of all public header libraries (e.g.,
@@ -78,7 +78,7 @@ COMMON_PIP_DEPS = [
"//tensorflow/contrib/labeled_tensor:labeled_tensor_pip",
"//tensorflow/contrib/nn:nn_py",
"//tensorflow/contrib/predictor:predictor_pip",
- "//tensorflow/contrib/proto:proto_pip",
+ "//tensorflow/contrib/proto:proto",
"//tensorflow/contrib/receptive_field:receptive_field_pip",
"//tensorflow/contrib/rpc:rpc_pip",
"//tensorflow/contrib/session_bundle:session_bundle_pip",
@@ -104,6 +104,7 @@ COMMON_PIP_DEPS = [
"//tensorflow/python/kernel_tests/testdata:self_adjoint_eig_op_test_files",
"//tensorflow/python/saved_model:saved_model",
"//tensorflow/python/tools:tools_pip",
+ "//tensorflow/python/tools/api/generator:create_python_api",
"//tensorflow/python:test_ops",
"//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
]
@@ -144,7 +145,6 @@ filegroup(
"@gast_archive//:PKG-INFO",
"@gemmlowp//:LICENSE",
"@gif_archive//:COPYING",
- "@grpc//:LICENSE",
"@highwayhash//:LICENSE",
"@jemalloc//:COPYING",
"@jpeg//:LICENSE.md",
@@ -153,8 +153,6 @@ filegroup(
"@lmdb//:LICENSE",
"@local_config_nccl//:LICENSE",
"@local_config_sycl//sycl:LICENSE.text",
- "@grpc//third_party/nanopb:LICENSE.txt",
- "@grpc//third_party/address_sorting:LICENSE",
"@nasm//:LICENSE",
"@nsync//:LICENSE",
"@pcre//:LICENCE",
@@ -168,7 +166,15 @@ filegroup(
"@org_python_pypi_backports_weakref//:LICENSE",
] + if_mkl([
"//third_party/mkl:LICENSE",
- ]) + tf_additional_license_deps(),
+ "//third_party/mkl_dnn:LICENSE",
+ ]) + if_not_system_lib(
+ "grpc",
+ [
+ "@grpc//:LICENSE",
+ "@grpc//third_party/nanopb:LICENSE.txt",
+ "@grpc//third_party/address_sorting:LICENSE",
+ ],
+ ) + tf_additional_license_deps(),
)
sh_binary(
@@ -183,9 +189,7 @@ sh_binary(
"//tensorflow/contrib/lite/python:tflite_convert",
"//tensorflow/contrib/lite/toco/python:toco_from_protos",
],
- }) + if_mkl(["//third_party/mkl:intel_binary_blob"]) + if_tensorrt([
- "//tensorflow/contrib/tensorrt:init_py",
- ]),
+ }) + if_mkl(["//third_party/mkl:intel_binary_blob"]),
)
# A genrule for generating a marker file for the pip package on Windows
diff --git a/tensorflow/tools/pip_package/build_pip_package.sh b/tensorflow/tools/pip_package/build_pip_package.sh
index b0089d3360..ca40f2eaa8 100755
--- a/tensorflow/tools/pip_package/build_pip_package.sh
+++ b/tensorflow/tools/pip_package/build_pip_package.sh
@@ -17,8 +17,12 @@
set -e
+function is_absolute {
+ [[ "$1" = /* ]] || [[ "$1" =~ ^[a-zA-Z]:[/\\].* ]]
+}
+
function real_path() {
- [[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
+ is_absolute "$1" && echo "$1" || echo "$PWD/${1#./}"
}
function cp_external() {
@@ -27,7 +31,7 @@ function cp_external() {
pushd .
cd "$src_dir"
- for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*org_tensorflow*'`; do
+ for f in `find . ! -type d ! -name '*.py' ! -path '*local_config_cuda*' ! -path '*local_config_tensorrt*' ! -path '*local_config_syslibs*' ! -path '*org_tensorflow*'`; do
mkdir -p "${dest_dir}/$(dirname ${f})"
cp "${f}" "${dest_dir}/$(dirname ${f})/"
done
diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py
index c630ca04b8..1f4c3d47bf 100644
--- a/tensorflow/tools/pip_package/setup.py
+++ b/tensorflow/tools/pip_package/setup.py
@@ -45,7 +45,7 @@ DOCLINES = __doc__.split('\n')
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
-_VERSION = '1.9.0-rc0'
+_VERSION = '1.9.0'
REQUIRED_PACKAGES = [
'absl-py >= 0.1.6',
@@ -55,7 +55,7 @@ REQUIRED_PACKAGES = [
'six >= 1.10.0',
'protobuf >= 3.6.0',
'setuptools <= 39.1.0',
- 'tensorboard >= 1.8.0, < 1.9.0',
+ 'tensorboard >= 1.10.0, < 1.11.0',
'termcolor >= 1.1.0',
]
diff --git a/tensorflow/workspace.bzl b/tensorflow/workspace.bzl
index 6cb8742df0..f4eac0a9a1 100644
--- a/tensorflow/workspace.bzl
+++ b/tensorflow/workspace.bzl
@@ -8,6 +8,7 @@ load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
+load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party:repo.bzl", "tf_http_archive")
@@ -35,6 +36,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
nccl_configure(name="local_config_nccl")
git_configure(name="local_config_git")
sycl_configure(name="local_config_sycl")
+ syslibs_configure(name="local_config_syslibs")
python_configure(name="local_config_python")
# For windows bazel build
@@ -142,13 +144,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "ortools_archive",
urls = [
- "https://mirror.bazel.build/github.com/google/or-tools/archive/253f7955c6a1fd805408fba2e42ac6d45b312d15.tar.gz",
- # Please uncomment me, when the next upgrade happens. Then
- # remove the whitelist entry in third_party/repo.bzl.
- # "https://github.com/google/or-tools/archive/253f7955c6a1fd805408fba2e42ac6d45b312d15.tar.gz",
+ "https://mirror.bazel.build/github.com/google/or-tools/archive/v6.7.2.tar.gz",
+ "https://github.com/google/or-tools/archive/v6.7.2.tar.gz",
],
- sha256 = "932075525642b04ac6f1b50589f1df5cd72ec2f448b721fd32234cf183f0e755",
- strip_prefix = "or-tools-253f7955c6a1fd805408fba2e42ac6d45b312d15/src",
+ sha256 = "d025a95f78b5fc5eaa4da5f395f23d11c23cf7dbd5069f1f627f002de87b86b9",
+ strip_prefix = "or-tools-6.7.2/src",
build_file = clean_dep("//third_party:ortools.BUILD"),
)
@@ -161,6 +161,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
sha256 = "2f945446b71336e7f5a2bcace1abcf0b23fbba368266c6a1be33de3de3b3c912",
strip_prefix = "re2-2018-04-01",
+ system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
)
tf_http_archive(
@@ -226,6 +227,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
strip_prefix = "nasm-2.13.03",
build_file = clean_dep("//third_party:nasm.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:nasm.BUILD"),
)
tf_http_archive(
@@ -237,6 +239,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "1a17020f859cb12711175a67eab5c71fc1904e04b587046218e36106e07eabde",
strip_prefix = "libjpeg-turbo-1.5.3",
build_file = clean_dep("//third_party/jpeg:jpeg.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:jpeg.BUILD"),
)
tf_http_archive(
@@ -249,6 +252,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
strip_prefix = "libpng-1.6.34",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
+ system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
)
tf_http_archive(
@@ -260,6 +264,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "ad68c1216c3a474cf360c7581a4001e952515b3649342100f2d7ca7c8e313da6",
strip_prefix = "sqlite-amalgamation-3240000",
build_file = clean_dep("//third_party:sqlite.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
)
tf_http_archive(
@@ -271,6 +276,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
strip_prefix = "giflib-5.1.4",
build_file = clean_dep("//third_party:gif.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
)
tf_http_archive(
@@ -282,6 +288,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
build_file = clean_dep("//third_party:six.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
)
tf_http_archive(
@@ -293,6 +300,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "ff6d2e2962d834acb125cc4dcc80c54a8c17c253f4cc9d9c43b5102a560bb75d",
strip_prefix = "astor-0.6.2",
build_file = clean_dep("//third_party:astor.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
)
tf_http_archive(
@@ -315,6 +323,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
build_file = clean_dep("//third_party:termcolor.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
)
tf_http_archive(
@@ -421,6 +430,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
strip_prefix = "pcre-8.42",
build_file = clean_dep("//third_party:pcre.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
)
tf_http_archive(
@@ -433,6 +443,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
strip_prefix = "swig-3.0.8",
build_file = clean_dep("//third_party:swig.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:swig.BUILD"),
)
tf_http_archive(
@@ -444,6 +455,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
strip_prefix = "curl-7.60.0",
build_file = clean_dep("//third_party:curl.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
)
tf_http_archive(
@@ -454,6 +466,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
],
sha256 = "50db9cf2221354485eb7c3bd55a4c27190caef7048a2a1a15fbe60a498f98b44",
strip_prefix = "grpc-1.13.0",
+ system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
)
tf_http_archive(
@@ -472,11 +485,11 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
tf_http_archive(
name = "llvm",
urls = [
- "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
- "https://github.com/llvm-mirror/llvm/archive/d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93.tar.gz",
+ "https://mirror.bazel.build/github.com/llvm-mirror/llvm/archive/35ffbe6bcf3b755f30633d834534a892b4c5fb29.tar.gz",
+ "https://github.com/llvm-mirror/llvm/archive/35ffbe6bcf3b755f30633d834534a892b4c5fb29.tar.gz",
],
- sha256 = "280fdc888e2eb88a3a8cc4e7d3034fffc87f98e3e686be31f8c719c6e5b67d2d",
- strip_prefix = "llvm-d5d94ca3a7f8526c2e4e5f663f9dc79ae5d39d93",
+ sha256 = "42b3924b56339bb953b587f3e55788c8fefa51068756e6ac2ee4aed9c187bbb8",
+ strip_prefix = "llvm-35ffbe6bcf3b755f30633d834534a892b4c5fb29",
build_file = clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"),
)
@@ -489,6 +502,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
)
tf_http_archive(
@@ -500,6 +514,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6",
strip_prefix = "jsoncpp-1.8.4",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
)
tf_http_archive(
@@ -521,6 +536,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
build_file = clean_dep("//third_party:zlib.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
)
tf_http_archive(
@@ -542,6 +558,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4",
strip_prefix = "snappy-1.1.7",
build_file = clean_dep("//third_party:snappy.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
)
tf_http_archive(
@@ -612,6 +629,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
sha256 = "3c8f25c02e806c3ce0ab5fb7da1817f89fc9732709024e2a81b6b82f7cc792a8",
strip_prefix = "jemalloc-4.4.0",
build_file = clean_dep("//third_party:jemalloc.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:jemalloc.BUILD"),
)
java_import_external(
@@ -690,6 +708,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
strip_prefix = "cython-0.28.4",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
+ system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
)
tf_http_archive(
@@ -722,6 +741,7 @@ def tf_workspace(path_prefix="", tf_repo_name=""):
"https://github.com/google/flatbuffers/archive/v1.9.0.tar.gz",
],
build_file = clean_dep("//third_party/flatbuffers:flatbuffers.BUILD"),
+ system_build_file = clean_dep("//third_party/systemlibs:flatbuffers.BUILD"),
)
native.new_http_archive(
diff --git a/third_party/examples/eager/spinn/spinn.py b/third_party/examples/eager/spinn/spinn.py
index 67456a5bdf..de63ebe9e6 100644
--- a/third_party/examples/eager/spinn/spinn.py
+++ b/third_party/examples/eager/spinn/spinn.py
@@ -419,7 +419,7 @@ class SNLIClassifierTrainer(tfe.Checkpointable):
# Create a custom learning rate Variable for the RMSProp optimizer, because
# the learning rate needs to be manually decayed later (see
# decay_learning_rate()).
- self._learning_rate = tfe.Variable(lr, name="learning_rate")
+ self._learning_rate = tf.Variable(lr, name="learning_rate")
self._optimizer = tf.train.RMSPropOptimizer(self._learning_rate,
epsilon=1e-6)
@@ -626,7 +626,7 @@ def train_or_infer_spinn(embed,
model = SNLIClassifier(config, embed)
global_step = tf.train.get_or_create_global_step()
trainer = SNLIClassifierTrainer(model, config.lr)
- checkpoint = tfe.Checkpoint(trainer=trainer, global_step=global_step)
+ checkpoint = tf.train.Checkpoint(trainer=trainer, global_step=global_step)
checkpoint.restore(tf.train.latest_checkpoint(config.logdir))
if inference_sentence_pair:
diff --git a/third_party/llvm/llvm.autogenerated.BUILD b/third_party/llvm/llvm.autogenerated.BUILD
index bf9f9ca9cf..c3b9ec4c25 100644
--- a/third_party/llvm/llvm.autogenerated.BUILD
+++ b/third_party/llvm/llvm.autogenerated.BUILD
@@ -28,9 +28,7 @@ llvm_host_triple = "x86_64-unknown-linux_gnu"
llvm_targets = [
"AArch64",
- # Uncomment to enable the AMDGPU backend.
- # TODO(phawkins): use a configure-time test.
- # "AMDGPU",
+ "AMDGPU",
"ARM",
"NVPTX",
"PowerPC",
@@ -256,13 +254,31 @@ llvm_target_list = [
("-gen-dag-isel", "lib/Target/AMDGPU/AMDGPUGenDAGISel.inc"),
("-gen-callingconv", "lib/Target/AMDGPU/AMDGPUGenCallingConv.inc"),
("-gen-subtarget", "lib/Target/AMDGPU/AMDGPUGenSubtargetInfo.inc"),
- ("-gen-tgt-intrinsic", "lib/Target/AMDGPU/AMDGPUGenIntrinsics.inc"),
+ ("-gen-tgt-intrinsic-impl", "lib/Target/AMDGPU/AMDGPUGenIntrinsicImpl.inc"),
+ ("-gen-tgt-intrinsic-enums", "lib/Target/AMDGPU/AMDGPUGenIntrinsicEnums.inc"),
("-gen-emitter", "lib/Target/AMDGPU/AMDGPUGenMCCodeEmitter.inc"),
("-gen-dfa-packetizer", "lib/Target/AMDGPU/AMDGPUGenDFAPacketizer.inc"),
("-gen-asm-writer", "lib/Target/AMDGPU/AMDGPUGenAsmWriter.inc"),
("-gen-asm-matcher", "lib/Target/AMDGPU/AMDGPUGenAsmMatcher.inc"),
("-gen-disassembler", "lib/Target/AMDGPU/AMDGPUGenDisassemblerTables.inc"),
("-gen-pseudo-lowering", "lib/Target/AMDGPU/AMDGPUGenMCPseudoLowering.inc"),
+ ("-gen-searchable-tables", "lib/Target/AMDGPU/AMDGPUGenSearchableTables.inc"),
+ ("-gen-global-isel", "lib/Target/AMDGPU/AMDGPUGenGlobalISel.inc"),
+ ],
+ },
+ {
+ "name": "AMDGPU",
+ "lower_name": "amdgpu_r600",
+ "short_name": "R600",
+ "tbl_outs": [
+ ("-gen-asm-writer", "lib/Target/AMDGPU/R600GenAsmWriter.inc"),
+ ("-gen-callingconv", "lib/Target/AMDGPU/R600GenCallingConv.inc"),
+ ("-gen-dag-isel", "lib/Target/AMDGPU/R600GenDAGISel.inc"),
+ ("-gen-dfa-packetizer", "lib/Target/AMDGPU/R600GenDFAPacketizer.inc"),
+ ("-gen-instr-info", "lib/Target/AMDGPU/R600GenInstrInfo.inc"),
+ ("-gen-emitter", "lib/Target/AMDGPU/R600GenMCCodeEmitter.inc"),
+ ("-gen-register-info", "lib/Target/AMDGPU/R600GenRegisterInfo.inc"),
+ ("-gen-subtarget", "lib/Target/AMDGPU/R600GenSubtargetInfo.inc"),
],
},
{
@@ -670,6 +686,7 @@ cc_library(
]),
copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
+ ":amdgpu_r600_target_gen",
":amdgpu_target_gen",
":config",
":core",
@@ -692,6 +709,7 @@ cc_library(
]),
copts = llvm_copts + ["-Iexternal/llvm/lib/Target/AMDGPU"],
deps = [
+ ":amdgpu_r600_target_gen",
":amdgpu_target_gen",
":config",
":core",
diff --git a/third_party/mkl_dnn/BUILD b/third_party/mkl_dnn/BUILD
index 5b01f6e3e4..d075809ee9 100644
--- a/third_party/mkl_dnn/BUILD
+++ b/third_party/mkl_dnn/BUILD
@@ -1 +1,11 @@
licenses(["notice"])
+
+exports_files(["LICENSE"])
+
+config_setting(
+ name = "using_mkl_dnn_only",
+ values = {
+ "define": "using_mkl_dnn_only=true",
+ },
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/mkl_dnn/LICENSE b/third_party/mkl_dnn/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/third_party/mkl_dnn/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/mkl_dnn/build_defs.bzl b/third_party/mkl_dnn/build_defs.bzl
new file mode 100644
index 0000000000..7ce2a7d9b0
--- /dev/null
+++ b/third_party/mkl_dnn/build_defs.bzl
@@ -0,0 +1,13 @@
+def if_mkl_open_source_only(if_true, if_false = []):
+ """Shorthand for select()'ing on whether we're building with
+ MKL-DNN open source lib only, without depending on MKL binary form.
+
+ Returns a select statement which evaluates to if_true if we're building
+ with MKL-DNN open source lib only. Otherwise,
+ the select statement evaluates to if_false.
+
+ """
+ return select({
+ str(Label("//third_party/mkl_dnn:using_mkl_dnn_only")): if_true,
+ "//conditions:default": if_false,
+ })
diff --git a/third_party/mkl_dnn/mkldnn.BUILD b/third_party/mkl_dnn/mkldnn.BUILD
index 68f24aabae..57d2e1292b 100644
--- a/third_party/mkl_dnn/mkldnn.BUILD
+++ b/third_party/mkl_dnn/mkldnn.BUILD
@@ -1,5 +1,10 @@
exports_files(["LICENSE"])
+load(
+ "@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
+ "if_mkl_open_source_only",
+)
+
config_setting(
name = "clang_linux_x86_64",
values = {
@@ -15,7 +20,14 @@ cc_library(
"src/cpu/*.cpp",
]),
hdrs = glob(["include/*"]),
- copts = ["-fexceptions"] + select({
+ copts = [
+ "-fexceptions",
+ "-DUSE_MKL",
+ "-DUSE_CBLAS",
+ ] + if_mkl_open_source_only([
+ "-UUSE_MKL",
+ "-UUSE_CBLAS",
+ ]) + select({
"@org_tensorflow//tensorflow:linux_x86_64": [
"-fopenmp", # only works with gcc
],
@@ -33,4 +45,19 @@ cc_library(
],
nocopts = "-fno-exceptions",
visibility = ["//visibility:public"],
+ deps = select({
+ "@org_tensorflow//tensorflow:linux_x86_64": [
+ "@mkl_linux//:mkl_headers",
+ "@mkl_linux//:mkl_libs_linux",
+ ],
+ "@org_tensorflow//tensorflow:darwin": [
+ "@mkl_darwin//:mkl_headers",
+ "@mkl_darwin//:mkl_libs_darwin",
+ ],
+ "@org_tensorflow//tensorflow:windows": [
+ "@mkl_windows//:mkl_headers",
+ "@mkl_windows//:mkl_libs_windows",
+ ],
+ "//conditions:default": [],
+ }),
)
diff --git a/third_party/nccl/nccl_configure.bzl b/third_party/nccl/nccl_configure.bzl
index 9dfcb18369..5d1ebf0686 100644
--- a/third_party/nccl/nccl_configure.bzl
+++ b/third_party/nccl/nccl_configure.bzl
@@ -47,10 +47,10 @@ alias(
)
"""
+# Local build results in dynamic link and the license should not be included.
_NCCL_LOCAL_BUILD_TEMPLATE = """
filegroup(
name = "LICENSE",
- data = ["nccl/NCCL-SLA.txt"],
visibility = ["//visibility:public"],
)
diff --git a/third_party/repo.bzl b/third_party/repo.bzl
index 9cee1fcc4b..5cb42691c5 100644
--- a/third_party/repo.bzl
+++ b/third_party/repo.bzl
@@ -35,6 +35,15 @@ def _get_env_var(ctx, name):
else:
return None
+# Checks if we should use the system lib instead of the bundled one
+def _use_system_lib(ctx, name):
+ syslibenv = _get_env_var(ctx, "TF_SYSTEM_LIBS")
+ if syslibenv:
+ for n in syslibenv.strip().split(","):
+ if n.strip() == name:
+ return True
+ return False
+
# Executes specified command with arguments and calls 'fail' if it exited with
# non-zero code
def _execute_and_check_ret_code(repo_ctx, cmd_and_args):
@@ -75,17 +84,28 @@ def _tf_http_archive(ctx):
"Even if you don't have permission to mirror the file, please " +
"put the correctly formatted mirror URL there anyway, because " +
"someone will come along shortly thereafter and mirror the file.")
- ctx.download_and_extract(
- ctx.attr.urls,
- "",
- ctx.attr.sha256,
- ctx.attr.type,
- ctx.attr.strip_prefix)
- if ctx.attr.delete:
- _apply_delete(ctx, ctx.attr.delete)
- if ctx.attr.patch_file != None:
- _apply_patch(ctx, ctx.attr.patch_file)
- if ctx.attr.build_file != None:
+
+ use_syslib = _use_system_lib(ctx, ctx.attr.name)
+ if not use_syslib:
+ ctx.download_and_extract(
+ ctx.attr.urls,
+ "",
+ ctx.attr.sha256,
+ ctx.attr.type,
+ ctx.attr.strip_prefix)
+ if ctx.attr.delete:
+ _apply_delete(ctx, ctx.attr.delete)
+ if ctx.attr.patch_file != None:
+ _apply_patch(ctx, ctx.attr.patch_file)
+
+ if use_syslib and ctx.attr.system_build_file != None:
+ # Use BUILD.bazel to avoid conflict with third party projects with
+ # BUILD or build (directory) underneath.
+ ctx.template("BUILD.bazel", ctx.attr.system_build_file, {
+ "%prefix%": ".." if _repos_are_siblings() else "external",
+ }, False)
+
+ elif ctx.attr.build_file != None:
# Use BUILD.bazel to avoid conflict with third party projects with
# BUILD or build (directory) underneath.
ctx.template("BUILD.bazel", ctx.attr.build_file, {
@@ -102,7 +122,11 @@ tf_http_archive = repository_rule(
"delete": attr.string_list(),
"patch_file": attr.label(),
"build_file": attr.label(),
- })
+ "system_build_file": attr.label(),
+ },
+ environ=[
+ "TF_SYSTEM_LIBS",
+ ])
"""Downloads and creates Bazel repos for dependencies.
This is a swappable replacement for both http_archive() and
diff --git a/third_party/systemlibs/BUILD b/third_party/systemlibs/BUILD
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/systemlibs/BUILD
diff --git a/third_party/systemlibs/BUILD.tpl b/third_party/systemlibs/BUILD.tpl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/systemlibs/BUILD.tpl
diff --git a/third_party/systemlibs/astor.BUILD b/third_party/systemlibs/astor.BUILD
new file mode 100644
index 0000000000..497ec4bcea
--- /dev/null
+++ b/third_party/systemlibs/astor.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # New BSD
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "astor",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/build_defs.bzl.tpl b/third_party/systemlibs/build_defs.bzl.tpl
new file mode 100644
index 0000000000..3faa46c581
--- /dev/null
+++ b/third_party/systemlibs/build_defs.bzl.tpl
@@ -0,0 +1,32 @@
+# -*- Python -*-
+"""Skylark macros for system libraries.
+"""
+
+SYSTEM_LIBS_ENABLED = %{syslibs_enabled}
+
+SYSTEM_LIBS_LIST = [
+%{syslibs_list}
+]
+
+
+def if_any_system_libs(a, b=[]):
+ """Conditional which evaluates to 'a' if any system libraries are configured."""
+ if SYSTEM_LIBS_ENABLED:
+ return a
+ else:
+ return b
+
+
+def if_system_lib(lib, a, b=[]):
+ """Conditional which evaluates to 'a' if we're using the system version of lib"""
+
+ if SYSTEM_LIBS_ENABLED and lib in SYSTEM_LIBS_LIST:
+ return a
+ else:
+ return b
+
+
+def if_not_system_lib(lib, a, b=[]):
+ """Conditional which evaluates to 'a' if we're using the system version of lib"""
+
+ return if_system_lib(lib, b, a)
diff --git a/third_party/systemlibs/curl.BUILD b/third_party/systemlibs/curl.BUILD
new file mode 100644
index 0000000000..c5f125caa9
--- /dev/null
+++ b/third_party/systemlibs/curl.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # MIT/X derivative license
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "curl",
+ linkopts = ["-lcurl"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/cython.BUILD b/third_party/systemlibs/cython.BUILD
new file mode 100644
index 0000000000..1d52587676
--- /dev/null
+++ b/third_party/systemlibs/cython.BUILD
@@ -0,0 +1,13 @@
+licenses(["notice"]) # Apache-2.0
+
+genrule(
+ name = "lncython",
+ outs = ["cython"],
+ cmd = "ln -s $$(which cython) $@",
+)
+
+sh_binary(
+ name = "cython_binary",
+ srcs = ["cython"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/flatbuffers.BUILD b/third_party/systemlibs/flatbuffers.BUILD
new file mode 100644
index 0000000000..14fceada82
--- /dev/null
+++ b/third_party/systemlibs/flatbuffers.BUILD
@@ -0,0 +1,38 @@
+licenses(["notice"]) # Apache 2.0
+
+filegroup(
+ name = "LICENSE.txt",
+ visibility = ["//visibility:public"],
+)
+
+# Public flatc library to compile flatbuffer files at runtime.
+cc_library(
+ name = "flatbuffers",
+ linkopts = ["-lflatbuffers"],
+ visibility = ["//visibility:public"],
+)
+
+# Public flatc compiler library.
+cc_library(
+ name = "flatc_library",
+ linkopts = ["-lflatbuffers"],
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "lnflatc",
+ outs = ["flatc.bin"],
+ cmd = "ln -s $$(which flatc) $@",
+)
+
+# Public flatc compiler.
+sh_binary(
+ name = "flatc",
+ srcs = ["flatc.bin"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "runtime_cc",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/gif.BUILD b/third_party/systemlibs/gif.BUILD
new file mode 100644
index 0000000000..5eb2c918ba
--- /dev/null
+++ b/third_party/systemlibs/gif.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # MIT
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "gif",
+ linkopts = ["-lgif"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/grpc.BUILD b/third_party/systemlibs/grpc.BUILD
new file mode 100644
index 0000000000..fd90eb0dd3
--- /dev/null
+++ b/third_party/systemlibs/grpc.BUILD
@@ -0,0 +1,54 @@
+licenses(["notice"]) # Apache v2
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc",
+ linkopts = ["-lgrpc"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc++",
+ linkopts = ["-lgrpc++"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc_unsecure",
+ linkopts = ["-lgrpc_unsecure"],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "grpc++_unsecure",
+ linkopts = ["-lgrpc++_unsecure"],
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "ln_grpc_cpp_plugin",
+ outs = ["grpc_cpp_plugin.bin"],
+ cmd = "ln -s $$(which grpc_cpp_plugin) $@",
+)
+
+sh_binary(
+ name = "grpc_cpp_plugin",
+ srcs = ["grpc_cpp_plugin.bin"],
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "ln_grpc_python_plugin",
+ outs = ["grpc_python_plugin.bin"],
+ cmd = "ln -s $$(which grpc_python_plugin) $@",
+)
+
+sh_binary(
+ name = "grpc_python_plugin",
+ srcs = ["grpc_python_plugin.bin"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/jemalloc.BUILD b/third_party/systemlibs/jemalloc.BUILD
new file mode 100644
index 0000000000..6a48d582ba
--- /dev/null
+++ b/third_party/systemlibs/jemalloc.BUILD
@@ -0,0 +1,30 @@
+licenses(["notice"]) # BSD
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "jemalloc_headers",
+ defines = [
+ "jemalloc_posix_memalign=posix_memalign",
+ "jemalloc_malloc=malloc",
+ "jemalloc_realloc=realloc",
+ "jemalloc_free=free",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "jemalloc_impl",
+ linkopts = ["-ljemalloc"],
+ defines = [
+ "jemalloc_posix_memalign=posix_memalign",
+ "jemalloc_malloc=malloc",
+ "jemalloc_realloc=realloc",
+ "jemalloc_free=free",
+ ],
+ visibility = ["//visibility:public"],
+ deps = [":jemalloc_headers"],
+)
diff --git a/third_party/systemlibs/jpeg.BUILD b/third_party/systemlibs/jpeg.BUILD
new file mode 100644
index 0000000000..f4f52da9bd
--- /dev/null
+++ b/third_party/systemlibs/jpeg.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # custom notice-style license, see LICENSE.md
+
+filegroup(
+ name = "LICENSE.md",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "jpeg",
+ linkopts = ["-ljpeg"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/jsoncpp.BUILD b/third_party/systemlibs/jsoncpp.BUILD
new file mode 100644
index 0000000000..cf91917cfb
--- /dev/null
+++ b/third_party/systemlibs/jsoncpp.BUILD
@@ -0,0 +1,37 @@
+licenses(["unencumbered"]) # Public Domain or MIT
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+HEADERS = [
+ "include/json/autolink.h",
+ "include/json/config.h",
+ "include/json/features.h",
+ "include/json/forwards.h",
+ "include/json/json.h",
+ "include/json/reader.h",
+ "include/json/value.h",
+ "include/json/version.h",
+ "include/json/writer.h",
+]
+
+genrule(
+ name = "link_headers",
+ outs = HEADERS,
+ cmd = """
+ for i in $(OUTS); do
+ i=$${i##*/}
+ ln -vsf /usr/include/jsoncpp/json/$$i $(@D)/include/json/$$i
+ done
+ """,
+)
+
+cc_library(
+ name = "jsoncpp",
+ hdrs = HEADERS,
+ includes = ["."],
+ linkopts = ["-ljsoncpp"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/lmdb.BUILD b/third_party/systemlibs/lmdb.BUILD
new file mode 100644
index 0000000000..6177b095ec
--- /dev/null
+++ b/third_party/systemlibs/lmdb.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # OpenLDAP Public License
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "lmdb",
+ linkopts = ["-llmdb"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/nasm.BUILD b/third_party/systemlibs/nasm.BUILD
new file mode 100644
index 0000000000..10ef8d8832
--- /dev/null
+++ b/third_party/systemlibs/nasm.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD 2-clause
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+sh_binary(
+ name = "nasm",
+ srcs = ["nasm"],
+ visibility = ["@jpeg//:__pkg__"],
+)
diff --git a/third_party/systemlibs/pcre.BUILD b/third_party/systemlibs/pcre.BUILD
new file mode 100644
index 0000000000..df74238847
--- /dev/null
+++ b/third_party/systemlibs/pcre.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD
+
+filegroup(
+ name = "LICENCE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "pcre",
+ linkopts = ["-lpcre"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/png.BUILD b/third_party/systemlibs/png.BUILD
new file mode 100644
index 0000000000..fc6b6f2d8b
--- /dev/null
+++ b/third_party/systemlibs/png.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD/MIT-like license
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "png",
+ linkopts = ["-lpng"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/re2.BUILD b/third_party/systemlibs/re2.BUILD
new file mode 100644
index 0000000000..c18e252dbc
--- /dev/null
+++ b/third_party/systemlibs/re2.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD/MIT-like license
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "re2",
+ linkopts = ["-lre2"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/six.BUILD b/third_party/systemlibs/six.BUILD
new file mode 100644
index 0000000000..ff9b1a540b
--- /dev/null
+++ b/third_party/systemlibs/six.BUILD
@@ -0,0 +1,11 @@
+licenses(["notice"]) # MIT
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "six",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/snappy.BUILD b/third_party/systemlibs/snappy.BUILD
new file mode 100644
index 0000000000..fd2db9e2df
--- /dev/null
+++ b/third_party/systemlibs/snappy.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD 3-Clause
+
+filegroup(
+ name = "COPYING",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "snappy",
+ linkopts = ["-lsnappy"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/sqlite.BUILD b/third_party/systemlibs/sqlite.BUILD
new file mode 100644
index 0000000000..20ee1ebbef
--- /dev/null
+++ b/third_party/systemlibs/sqlite.BUILD
@@ -0,0 +1,15 @@
+licenses(["unencumbered"]) # Public Domain
+
+# Production build of SQLite library that's baked into TensorFlow.
+cc_library(
+ name = "org_sqlite",
+ linkopts = ["-lsqlite3"],
+ visibility = ["//visibility:public"],
+)
+
+# This is a Copybara sync helper for Google.
+py_library(
+ name = "python",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/swig.BUILD b/third_party/systemlibs/swig.BUILD
new file mode 100644
index 0000000000..4c9b74dadb
--- /dev/null
+++ b/third_party/systemlibs/swig.BUILD
@@ -0,0 +1,23 @@
+licenses(["restricted"]) # GPLv3
+
+filegroup(
+ name = "LICENSE",
+ visibility = ["//visibility:public"],
+)
+
+filegroup(
+ name = "templates",
+ visibility = ["//visibility:public"],
+)
+
+genrule(
+ name = "lnswiglink",
+ outs = ["swiglink"],
+ cmd = "ln -s $$(which swig) $@",
+)
+
+sh_binary(
+ name = "swig",
+ srcs = ["swiglink"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/syslibs_configure.bzl b/third_party/systemlibs/syslibs_configure.bzl
new file mode 100644
index 0000000000..07a44c317e
--- /dev/null
+++ b/third_party/systemlibs/syslibs_configure.bzl
@@ -0,0 +1,160 @@
+# -*- Python -*-
+"""Repository rule for system library autoconfiguration.
+
+`syslibs_configure` depends on the following environment variables:
+
+ * `TF_SYSTEM_LIBS`: list of third party dependencies that should use
+ the system version instead
+"""
+
+_TF_SYSTEM_LIBS="TF_SYSTEM_LIBS"
+
+VALID_LIBS=[
+ "astor_archive",
+ "com_googlesource_code_re2",
+ "curl",
+ "cython",
+ "flatbuffers",
+ "gif_archive",
+ "grpc",
+ "jemalloc",
+ "jpeg",
+ "jsoncpp_git",
+ "lmdb",
+ "nasm",
+ "org_sqlite",
+ "pcre",
+ "png_archive",
+ "six_archive",
+ "snappy",
+ "swig",
+ "termcolor_archive",
+ "zlib_archive",
+]
+
+
+def auto_configure_fail(msg):
+ """Output failure message when syslibs configuration fails."""
+ red = "\033[0;31m"
+ no_color = "\033[0m"
+ fail("\n%sSystem Library Configuration Error:%s %s\n" % (red, no_color, msg))
+
+
+def _is_windows(repository_ctx):
+ """Returns true if the host operating system is windows."""
+ os_name = repository_ctx.os.name.lower()
+ if os_name.find("windows") != -1:
+ return True
+ return False
+
+
+def _enable_syslibs(repository_ctx):
+ s = repository_ctx.os.environ.get(_TF_SYSTEM_LIBS, '').strip()
+ if not _is_windows(repository_ctx) and s != None and s != '':
+ return True
+ return False
+
+
+def _get_system_lib_list(repository_ctx):
+ """Gets the list of deps that should use the system lib.
+
+ Args:
+ repository_ctx: The repository context.
+
+ Returns:
+ A string version of a python list
+ """
+ if _TF_SYSTEM_LIBS not in repository_ctx.os.environ:
+ return []
+
+ libenv = repository_ctx.os.environ[_TF_SYSTEM_LIBS].strip()
+ libs = []
+
+ for lib in list(libenv.split(',')):
+ lib = lib.strip()
+ if lib == "":
+ continue
+ if lib not in VALID_LIBS:
+ auto_configure_fail("Invalid system lib set: %s" % lib)
+ return []
+ libs.append(lib)
+
+ return libs
+
+
+def _format_system_lib_list(repository_ctx):
+ """Formats the list of deps that should use the system lib.
+
+ Args:
+ repository_ctx: The repository context.
+
+ Returns:
+ A list of the names of deps that should use the system lib.
+ """
+ libs = _get_system_lib_list(repository_ctx)
+ ret = ''
+ for lib in libs:
+ ret += "'%s',\n" % lib
+
+ return ret
+
+
+def _tpl(repository_ctx, tpl, substitutions={}, out=None):
+ if not out:
+ out = tpl.replace(":", "")
+ repository_ctx.template(
+ out,
+ Label("//third_party/systemlibs%s.tpl" % tpl),
+ substitutions,
+ False)
+
+
+def _create_dummy_repository(repository_ctx):
+ """Creates the dummy repository to build with all bundled libraries."""
+
+ _tpl(repository_ctx, ":BUILD")
+ _tpl(repository_ctx, ":build_defs.bzl",
+ {
+ "%{syslibs_enabled}": 'False',
+ "%{syslibs_list}": '',
+ })
+
+
+def _create_local_repository(repository_ctx):
+ """Creates the repository to build with system libraries."""
+
+ _tpl(repository_ctx, ":BUILD")
+ _tpl(repository_ctx, ":build_defs.bzl",
+ {
+ "%{syslibs_enabled}": 'True',
+ "%{syslibs_list}": _format_system_lib_list(repository_ctx),
+ })
+
+
+def _syslibs_autoconf_impl(repository_ctx):
+ """Implementation of the syslibs_configure repository rule."""
+ if not _enable_syslibs(repository_ctx):
+ _create_dummy_repository(repository_ctx)
+ else:
+ _create_local_repository(repository_ctx)
+
+
+syslibs_configure = repository_rule(
+ implementation = _syslibs_autoconf_impl,
+ environ = [
+ _TF_SYSTEM_LIBS,
+ ],
+)
+
+"""Configures the build to link to system libraries
+instead of using bundled versions.
+
+Add the following to your WORKSPACE FILE:
+
+```python
+syslibs_configure(name = "local_config_syslibs")
+```
+
+Args:
+ name: A unique name for this workspace rule.
+"""
diff --git a/third_party/systemlibs/termcolor.BUILD b/third_party/systemlibs/termcolor.BUILD
new file mode 100644
index 0000000000..915eb621d5
--- /dev/null
+++ b/third_party/systemlibs/termcolor.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # MIT
+
+filegroup(
+ name = "COPYING.txt",
+ visibility = ["//visibility:public"],
+)
+
+py_library(
+ name = "termcolor",
+ srcs_version = "PY2AND3",
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/systemlibs/zlib.BUILD b/third_party/systemlibs/zlib.BUILD
new file mode 100644
index 0000000000..69462ae6cb
--- /dev/null
+++ b/third_party/systemlibs/zlib.BUILD
@@ -0,0 +1,12 @@
+licenses(["notice"]) # BSD/MIT-like license (for zlib)
+
+filegroup(
+ name = "zlib.h",
+ visibility = ["//visibility:public"],
+)
+
+cc_library(
+ name = "zlib",
+ linkopts = ["-lz"],
+ visibility = ["//visibility:public"],
+)
diff --git a/third_party/toolchains/cpus/py/BUILD b/third_party/toolchains/cpus/py/BUILD
index c175742cbf..1235988abb 100644
--- a/third_party/toolchains/cpus/py/BUILD
+++ b/third_party/toolchains/cpus/py/BUILD
@@ -6,18 +6,24 @@ licenses(["restricted"])
package(default_visibility = ["//visibility:public"])
+# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib
+# See https://docs.python.org/3/extending/windows.html
+cc_import(
+ name = "python_lib",
+ interface_library = select({
+ ":windows": ":python_import_lib",
+ # A placeholder for Unix platforms which makes --no_build happy.
+ "//conditions:default": "not-existing.lib",
+ }),
+ system_provided = 1,
+)
+
cc_library(
name = "python_headers",
hdrs = [":python_include"],
- data = select({
- ":windows": [":python_import_lib"],
- "//conditions:default": [],
- }),
includes = ["python_include"],
- linkopts = select({
- # TODO(pcloudy): Ideally, this should just go into deps after resolving
- # https://github.com/bazelbuild/bazel/issues/3237,
- ":windows": ["$(locations :python_import_lib)"],
+ deps = select({
+ ":windows": [":python_lib"],
"//conditions:default": [],
}),
)
@@ -37,161 +43,135 @@ config_setting(
genrule(
name = "python_include",
outs = [
+ "python_include/Python-ast.h",
+ "python_include/Python.h",
+ "python_include/abstract.h",
+ "python_include/asdl.h",
+ "python_include/ast.h",
+ "python_include/bitset.h",
+ "python_include/boolobject.h",
+ "python_include/bufferobject.h",
+ "python_include/bytearrayobject.h",
+ "python_include/bytes_methods.h",
+ "python_include/bytesobject.h",
+ "python_include/cStringIO.h",
+ "python_include/cellobject.h",
+ "python_include/ceval.h",
+ "python_include/classobject.h",
+ "python_include/cobject.h",
"python_include/code.h",
+ "python_include/codecs.h",
+ "python_include/compile.h",
+ "python_include/complexobject.h",
+ "python_include/datetime.h",
+ "python_include/descrobject.h",
+ "python_include/dictobject.h",
"python_include/dtoa.h",
- "python_include/tupleobject.h",
- "python_include/object.h",
- "python_include/ast.h",
- "python_include/pymacconfig.h",
+ "python_include/enumobject.h",
"python_include/errcode.h",
+ "python_include/eval.h",
+ "python_include/fileobject.h",
+ "python_include/floatobject.h",
"python_include/frameobject.h",
- "python_include/pgenheaders.h",
- "python_include/cellobject.h",
+ "python_include/funcobject.h",
+ "python_include/genobject.h",
+ "python_include/graminit.h",
+ "python_include/grammar.h",
+ "python_include/import.h",
"python_include/intobject.h",
- "python_include/pythread.h",
- "python_include/cStringIO.h",
- "python_include/boolobject.h",
+ "python_include/intrcheck.h",
+ "python_include/iterobject.h",
+ "python_include/listobject.h",
+ "python_include/longintrepr.h",
+ "python_include/longobject.h",
+ "python_include/marshal.h",
+ "python_include/memoryobject.h",
+ "python_include/metagrammar.h",
+ "python_include/methodobject.h",
"python_include/modsupport.h",
- "python_include/import.h",
- "python_include/pymath.h",
+ "python_include/moduleobject.h",
"python_include/node.h",
- "python_include/funcobject.h",
- "python_include/eval.h",
- "python_include/longintrepr.h",
- "python_include/floatobject.h",
- "python_include/rangeobject.h",
- "python_include/pyfpe.h",
- "python_include/pystrcmp.h",
- "python_include/dictobject.h",
- "python_include/pyarena.h",
+ "python_include/object.h",
"python_include/objimpl.h",
- "python_include/bitset.h",
- "python_include/memoryobject.h",
- "python_include/bytearrayobject.h",
+ "python_include/opcode.h",
+ "python_include/osdefs.h",
+ "python_include/parsetok.h",
+ "python_include/patchlevel.h",
+ "python_include/pgen.h",
+ "python_include/pgenheaders.h",
+ "python_include/py_curses.h",
+ "python_include/pyarena.h",
+ "python_include/pycapsule.h",
+ "python_include/pyconfig.h",
+ "python_include/pyctype.h",
"python_include/pydebug.h",
"python_include/pyerrors.h",
- "python_include/weakrefobject.h",
- "python_include/grammar.h",
- "python_include/symtable.h",
- "python_include/longobject.h",
- "python_include/structmember.h",
- "python_include/enumobject.h",
- "python_include/classobject.h",
- "python_include/unicodeobject.h",
- "python_include/sliceobject.h",
- "python_include/pystrtod.h",
- "python_include/genobject.h",
- "python_include/pymactoolbox.h",
- "python_include/compile.h",
"python_include/pyexpat.h",
- "python_include/asdl.h",
- "python_include/codecs.h",
- "python_include/pyctype.h",
- "python_include/sysmodule.h",
- "python_include/methodobject.h",
- "python_include/graminit.h",
- "python_include/cobject.h",
- "python_include/intrcheck.h",
- "python_include/pyport.h",
- "python_include/warnings.h",
- "python_include/osdefs.h",
- "python_include/fileobject.h",
- "python_include/stringobject.h",
- "python_include/timefuncs.h",
- "python_include/traceback.h",
- "python_include/ceval.h",
- "python_include/bytes_methods.h",
- "python_include/pyconfig.h",
- "python_include/Python.h",
- "python_include/moduleobject.h",
- "python_include/pystate.h",
- "python_include/descrobject.h",
- "python_include/ucnhash.h",
+ "python_include/pyfpe.h",
"python_include/pygetopt.h",
+ "python_include/pymacconfig.h",
+ "python_include/pymactoolbox.h",
+ "python_include/pymath.h",
"python_include/pymem.h",
- "python_include/complexobject.h",
- "python_include/structseq.h",
- "python_include/datetime.h",
+ "python_include/pyport.h",
+ "python_include/pystate.h",
+ "python_include/pystrcmp.h",
+ "python_include/pystrtod.h",
"python_include/pythonrun.h",
- "python_include/numpy/oldnumeric.h",
- "python_include/numpy/npy_1_7_deprecated_api.h",
- "python_include/numpy/ufunc_api.txt",
- "python_include/numpy/multiarray_api.txt",
- "python_include/numpy/halffloat.h",
- "python_include/numpy/npy_common.h",
- "python_include/numpy/utils.h",
- "python_include/numpy/npy_interrupt.h",
- "python_include/numpy/npy_endian.h",
- "python_include/numpy/__ufunc_api.h",
- "python_include/numpy/_neighborhood_iterator_imp.h",
- "python_include/numpy/ufuncobject.h",
- "python_include/numpy/ndarraytypes.h",
- "python_include/numpy/npy_math.h",
- "python_include/numpy/noprefix.h",
- "python_include/numpy/npy_3kcompat.h",
- "python_include/numpy/arrayscalars.h",
- "python_include/numpy/npy_os.h",
- "python_include/numpy/ndarrayobject.h",
- "python_include/numpy/npy_no_deprecated_api.h",
- "python_include/numpy/arrayobject.h",
- "python_include/numpy/_numpyconfig.h",
- "python_include/numpy/__multiarray_api.h",
- "python_include/numpy/npy_cpu.h",
- "python_include/numpy/old_defines.h",
- "python_include/numpy/numpyconfig.h",
- "python_include/pycapsule.h",
+ "python_include/pythread.h",
+ "python_include/rangeobject.h",
"python_include/setobject.h",
- "python_include/listobject.h",
- "python_include/bytesobject.h",
- "python_include/pgen.h",
- "python_include/patchlevel.h",
- "python_include/opcode.h",
- "python_include/parsetok.h",
- "python_include/marshal.h",
+ "python_include/sliceobject.h",
+ "python_include/stringobject.h",
+ "python_include/structmember.h",
+ "python_include/structseq.h",
+ "python_include/symtable.h",
+ "python_include/sysmodule.h",
+ "python_include/timefuncs.h",
"python_include/token.h",
- "python_include/iterobject.h",
- "python_include/abstract.h",
- "python_include/py_curses.h",
- "python_include/metagrammar.h",
- "python_include/bufferobject.h",
- "python_include/Python-ast.h",
+ "python_include/traceback.h",
+ "python_include/tupleobject.h",
+ "python_include/ucnhash.h",
+ "python_include/unicodeobject.h",
+ "python_include/warnings.h",
+ "python_include/weakrefobject.h",
],
cmd = """
-cp "/usr/include/python2.7/code.h" "$(@D)/python_include/code.h" && cp "/usr/include/python2.7/dtoa.h" "$(@D)/python_include/dtoa.h" && cp "/usr/include/python2.7/tupleobject.h" "$(@D)/python_include/tupleobject.h" && cp "/usr/include/python2.7/object.h" "$(@D)/python_include/object.h" && cp "/usr/include/python2.7/ast.h" "$(@D)/python_include/ast.h" && cp "/usr/include/python2.7/pymacconfig.h" "$(@D)/python_include/pymacconfig.h" && cp "/usr/include/python2.7/errcode.h" "$(@D)/python_include/errcode.h" && cp "/usr/include/python2.7/frameobject.h" "$(@D)/python_include/frameobject.h" && cp "/usr/include/python2.7/pgenheaders.h" "$(@D)/python_include/pgenheaders.h" && cp "/usr/include/python2.7/cellobject.h" "$(@D)/python_include/cellobject.h" && cp "/usr/include/python2.7/intobject.h" "$(@D)/python_include/intobject.h" && cp "/usr/include/python2.7/pythread.h" "$(@D)/python_include/pythread.h" && cp "/usr/include/python2.7/cStringIO.h" "$(@D)/python_include/cStringIO.h" && cp "/usr/include/python2.7/boolobject.h" "$(@D)/python_include/boolobject.h" && cp "/usr/include/python2.7/modsupport.h" "$(@D)/python_include/modsupport.h" && cp "/usr/include/python2.7/import.h" "$(@D)/python_include/import.h" && cp "/usr/include/python2.7/pymath.h" "$(@D)/python_include/pymath.h" && cp "/usr/include/python2.7/node.h" "$(@D)/python_include/node.h" && cp "/usr/include/python2.7/funcobject.h" "$(@D)/python_include/funcobject.h" && cp "/usr/include/python2.7/eval.h" "$(@D)/python_include/eval.h" && cp "/usr/include/python2.7/longintrepr.h" "$(@D)/python_include/longintrepr.h" && cp "/usr/include/python2.7/floatobject.h" "$(@D)/python_include/floatobject.h" && cp "/usr/include/python2.7/rangeobject.h" "$(@D)/python_include/rangeobject.h" && cp "/usr/include/python2.7/pyfpe.h" "$(@D)/python_include/pyfpe.h" && cp "/usr/include/python2.7/pystrcmp.h" "$(@D)/python_include/pystrcmp.h" && cp "/usr/include/python2.7/dictobject.h" "$(@D)/python_include/dictobject.h" && cp "/usr/include/python2.7/pyarena.h" "$(@D)/python_include/pyarena.h" && cp "/usr/include/python2.7/objimpl.h" "$(@D)/python_include/objimpl.h" && cp "/usr/include/python2.7/bitset.h" "$(@D)/python_include/bitset.h" && cp "/usr/include/python2.7/memoryobject.h" "$(@D)/python_include/memoryobject.h" && cp "/usr/include/python2.7/bytearrayobject.h" "$(@D)/python_include/bytearrayobject.h" && cp "/usr/include/python2.7/pydebug.h" "$(@D)/python_include/pydebug.h" && cp "/usr/include/python2.7/pyerrors.h" "$(@D)/python_include/pyerrors.h" && cp "/usr/include/python2.7/weakrefobject.h" "$(@D)/python_include/weakrefobject.h" && cp "/usr/include/python2.7/grammar.h" "$(@D)/python_include/grammar.h" && cp "/usr/include/python2.7/symtable.h" "$(@D)/python_include/symtable.h" && cp "/usr/include/python2.7/longobject.h" "$(@D)/python_include/longobject.h" && cp "/usr/include/python2.7/structmember.h" "$(@D)/python_include/structmember.h" && cp "/usr/include/python2.7/enumobject.h" "$(@D)/python_include/enumobject.h" && cp "/usr/include/python2.7/classobject.h" "$(@D)/python_include/classobject.h" && cp "/usr/include/python2.7/unicodeobject.h" "$(@D)/python_include/unicodeobject.h" && cp "/usr/include/python2.7/sliceobject.h" "$(@D)/python_include/sliceobject.h" && cp "/usr/include/python2.7/pystrtod.h" "$(@D)/python_include/pystrtod.h" && cp "/usr/include/python2.7/genobject.h" "$(@D)/python_include/genobject.h" && cp "/usr/include/python2.7/pymactoolbox.h" "$(@D)/python_include/pymactoolbox.h" && cp "/usr/include/python2.7/compile.h" "$(@D)/python_include/compile.h" && cp "/usr/include/python2.7/pyexpat.h" "$(@D)/python_include/pyexpat.h" && cp "/usr/include/python2.7/asdl.h" "$(@D)/python_include/asdl.h" && cp "/usr/include/python2.7/codecs.h" "$(@D)/python_include/codecs.h" && cp "/usr/include/python2.7/pyctype.h" "$(@D)/python_include/pyctype.h" && cp "/usr/include/python2.7/sysmodule.h" "$(@D)/python_include/sysmodule.h" && cp "/usr/include/python2.7/methodobject.h" "$(@D)/python_include/methodobject.h" && cp "/usr/include/python2.7/graminit.h" "$(@D)/python_include/graminit.h" && cp "/usr/include/python2.7/cobject.h" "$(@D)/python_include/cobject.h" && cp "/usr/include/python2.7/intrcheck.h" "$(@D)/python_include/intrcheck.h" && cp "/usr/include/python2.7/pyport.h" "$(@D)/python_include/pyport.h" && cp "/usr/include/python2.7/warnings.h" "$(@D)/python_include/warnings.h" && cp "/usr/include/python2.7/osdefs.h" "$(@D)/python_include/osdefs.h" && cp "/usr/include/python2.7/fileobject.h" "$(@D)/python_include/fileobject.h" && cp "/usr/include/python2.7/stringobject.h" "$(@D)/python_include/stringobject.h" && cp "/usr/include/python2.7/timefuncs.h" "$(@D)/python_include/timefuncs.h" && cp "/usr/include/python2.7/traceback.h" "$(@D)/python_include/traceback.h" && cp "/usr/include/python2.7/ceval.h" "$(@D)/python_include/ceval.h" && cp "/usr/include/python2.7/bytes_methods.h" "$(@D)/python_include/bytes_methods.h" && cp "/usr/include/python2.7/pyconfig.h" "$(@D)/python_include/pyconfig.h" && cp "/usr/include/python2.7/Python.h" "$(@D)/python_include/Python.h" && cp "/usr/include/python2.7/moduleobject.h" "$(@D)/python_include/moduleobject.h" && cp "/usr/include/python2.7/pystate.h" "$(@D)/python_include/pystate.h" && cp "/usr/include/python2.7/descrobject.h" "$(@D)/python_include/descrobject.h" && cp "/usr/include/python2.7/ucnhash.h" "$(@D)/python_include/ucnhash.h" && cp "/usr/include/python2.7/pygetopt.h" "$(@D)/python_include/pygetopt.h" && cp "/usr/include/python2.7/pymem.h" "$(@D)/python_include/pymem.h" && cp "/usr/include/python2.7/complexobject.h" "$(@D)/python_include/complexobject.h" && cp "/usr/include/python2.7/structseq.h" "$(@D)/python_include/structseq.h" && cp "/usr/include/python2.7/datetime.h" "$(@D)/python_include/datetime.h" && cp "/usr/include/python2.7/pythonrun.h" "$(@D)/python_include/pythonrun.h" && cp "/usr/include/python2.7/numpy/oldnumeric.h" "$(@D)/python_include/numpy/oldnumeric.h" && cp "/usr/include/python2.7/numpy/npy_1_7_deprecated_api.h" "$(@D)/python_include/numpy/npy_1_7_deprecated_api.h" && cp "/usr/include/python2.7/numpy/ufunc_api.txt" "$(@D)/python_include/numpy/ufunc_api.txt" && cp "/usr/include/python2.7/numpy/multiarray_api.txt" "$(@D)/python_include/numpy/multiarray_api.txt" && cp "/usr/include/python2.7/numpy/halffloat.h" "$(@D)/python_include/numpy/halffloat.h" && cp "/usr/include/python2.7/numpy/npy_common.h" "$(@D)/python_include/numpy/npy_common.h" && cp "/usr/include/python2.7/numpy/utils.h" "$(@D)/python_include/numpy/utils.h" && cp "/usr/include/python2.7/numpy/npy_interrupt.h" "$(@D)/python_include/numpy/npy_interrupt.h" && cp "/usr/include/python2.7/numpy/npy_endian.h" "$(@D)/python_include/numpy/npy_endian.h" && cp "/usr/include/python2.7/numpy/__ufunc_api.h" "$(@D)/python_include/numpy/__ufunc_api.h" && cp "/usr/include/python2.7/numpy/_neighborhood_iterator_imp.h" "$(@D)/python_include/numpy/_neighborhood_iterator_imp.h" && cp "/usr/include/python2.7/numpy/ufuncobject.h" "$(@D)/python_include/numpy/ufuncobject.h" && cp "/usr/include/python2.7/numpy/ndarraytypes.h" "$(@D)/python_include/numpy/ndarraytypes.h" && cp "/usr/include/python2.7/numpy/npy_math.h" "$(@D)/python_include/numpy/npy_math.h" && cp "/usr/include/python2.7/numpy/noprefix.h" "$(@D)/python_include/numpy/noprefix.h" && cp "/usr/include/python2.7/numpy/npy_3kcompat.h" "$(@D)/python_include/numpy/npy_3kcompat.h" && cp "/usr/include/python2.7/numpy/arrayscalars.h" "$(@D)/python_include/numpy/arrayscalars.h" && cp "/usr/include/python2.7/numpy/npy_os.h" "$(@D)/python_include/numpy/npy_os.h" && cp "/usr/include/python2.7/numpy/ndarrayobject.h" "$(@D)/python_include/numpy/ndarrayobject.h" && cp "/usr/include/python2.7/numpy/npy_no_deprecated_api.h" "$(@D)/python_include/numpy/npy_no_deprecated_api.h" && cp "/usr/include/python2.7/numpy/arrayobject.h" "$(@D)/python_include/numpy/arrayobject.h" && cp "/usr/include/python2.7/numpy/_numpyconfig.h" "$(@D)/python_include/numpy/_numpyconfig.h" && cp "/usr/include/python2.7/numpy/__multiarray_api.h" "$(@D)/python_include/numpy/__multiarray_api.h" && cp "/usr/include/python2.7/numpy/npy_cpu.h" "$(@D)/python_include/numpy/npy_cpu.h" && cp "/usr/include/python2.7/numpy/old_defines.h" "$(@D)/python_include/numpy/old_defines.h" && cp "/usr/include/python2.7/numpy/numpyconfig.h" "$(@D)/python_include/numpy/numpyconfig.h" && cp "/usr/include/python2.7/pycapsule.h" "$(@D)/python_include/pycapsule.h" && cp "/usr/include/python2.7/setobject.h" "$(@D)/python_include/setobject.h" && cp "/usr/include/python2.7/listobject.h" "$(@D)/python_include/listobject.h" && cp "/usr/include/python2.7/bytesobject.h" "$(@D)/python_include/bytesobject.h" && cp "/usr/include/python2.7/pgen.h" "$(@D)/python_include/pgen.h" && cp "/usr/include/python2.7/patchlevel.h" "$(@D)/python_include/patchlevel.h" && cp "/usr/include/python2.7/opcode.h" "$(@D)/python_include/opcode.h" && cp "/usr/include/python2.7/parsetok.h" "$(@D)/python_include/parsetok.h" && cp "/usr/include/python2.7/marshal.h" "$(@D)/python_include/marshal.h" && cp "/usr/include/python2.7/token.h" "$(@D)/python_include/token.h" && cp "/usr/include/python2.7/iterobject.h" "$(@D)/python_include/iterobject.h" && cp "/usr/include/python2.7/abstract.h" "$(@D)/python_include/abstract.h" && cp "/usr/include/python2.7/py_curses.h" "$(@D)/python_include/py_curses.h" && cp "/usr/include/python2.7/metagrammar.h" "$(@D)/python_include/metagrammar.h" && cp "/usr/include/python2.7/bufferobject.h" "$(@D)/python_include/bufferobject.h" && cp "/usr/include/python2.7/Python-ast.h" "$(@D)/python_include/Python-ast.h"
+cp "/usr/include/python2.7/Python-ast.h" "$(@D)/python_include/Python-ast.h" && cp "/usr/include/python2.7/Python.h" "$(@D)/python_include/Python.h" && cp "/usr/include/python2.7/abstract.h" "$(@D)/python_include/abstract.h" && cp "/usr/include/python2.7/asdl.h" "$(@D)/python_include/asdl.h" && cp "/usr/include/python2.7/ast.h" "$(@D)/python_include/ast.h" && cp "/usr/include/python2.7/bitset.h" "$(@D)/python_include/bitset.h" && cp "/usr/include/python2.7/boolobject.h" "$(@D)/python_include/boolobject.h" && cp "/usr/include/python2.7/bufferobject.h" "$(@D)/python_include/bufferobject.h" && cp "/usr/include/python2.7/bytearrayobject.h" "$(@D)/python_include/bytearrayobject.h" && cp "/usr/include/python2.7/bytes_methods.h" "$(@D)/python_include/bytes_methods.h" && cp "/usr/include/python2.7/bytesobject.h" "$(@D)/python_include/bytesobject.h" && cp "/usr/include/python2.7/cStringIO.h" "$(@D)/python_include/cStringIO.h" && cp "/usr/include/python2.7/cellobject.h" "$(@D)/python_include/cellobject.h" && cp "/usr/include/python2.7/ceval.h" "$(@D)/python_include/ceval.h" && cp "/usr/include/python2.7/classobject.h" "$(@D)/python_include/classobject.h" && cp "/usr/include/python2.7/cobject.h" "$(@D)/python_include/cobject.h" && cp "/usr/include/python2.7/code.h" "$(@D)/python_include/code.h" && cp "/usr/include/python2.7/codecs.h" "$(@D)/python_include/codecs.h" && cp "/usr/include/python2.7/compile.h" "$(@D)/python_include/compile.h" && cp "/usr/include/python2.7/complexobject.h" "$(@D)/python_include/complexobject.h" && cp "/usr/include/python2.7/datetime.h" "$(@D)/python_include/datetime.h" && cp "/usr/include/python2.7/descrobject.h" "$(@D)/python_include/descrobject.h" && cp "/usr/include/python2.7/dictobject.h" "$(@D)/python_include/dictobject.h" && cp "/usr/include/python2.7/dtoa.h" "$(@D)/python_include/dtoa.h" && cp "/usr/include/python2.7/enumobject.h" "$(@D)/python_include/enumobject.h" && cp "/usr/include/python2.7/errcode.h" "$(@D)/python_include/errcode.h" && cp "/usr/include/python2.7/eval.h" "$(@D)/python_include/eval.h" && cp "/usr/include/python2.7/fileobject.h" "$(@D)/python_include/fileobject.h" && cp "/usr/include/python2.7/floatobject.h" "$(@D)/python_include/floatobject.h" && cp "/usr/include/python2.7/frameobject.h" "$(@D)/python_include/frameobject.h" && cp "/usr/include/python2.7/funcobject.h" "$(@D)/python_include/funcobject.h" && cp "/usr/include/python2.7/genobject.h" "$(@D)/python_include/genobject.h" && cp "/usr/include/python2.7/graminit.h" "$(@D)/python_include/graminit.h" && cp "/usr/include/python2.7/grammar.h" "$(@D)/python_include/grammar.h" && cp "/usr/include/python2.7/import.h" "$(@D)/python_include/import.h" && cp "/usr/include/python2.7/intobject.h" "$(@D)/python_include/intobject.h" && cp "/usr/include/python2.7/intrcheck.h" "$(@D)/python_include/intrcheck.h" && cp "/usr/include/python2.7/iterobject.h" "$(@D)/python_include/iterobject.h" && cp "/usr/include/python2.7/listobject.h" "$(@D)/python_include/listobject.h" && cp "/usr/include/python2.7/longintrepr.h" "$(@D)/python_include/longintrepr.h" && cp "/usr/include/python2.7/longobject.h" "$(@D)/python_include/longobject.h" && cp "/usr/include/python2.7/marshal.h" "$(@D)/python_include/marshal.h" && cp "/usr/include/python2.7/memoryobject.h" "$(@D)/python_include/memoryobject.h" && cp "/usr/include/python2.7/metagrammar.h" "$(@D)/python_include/metagrammar.h" && cp "/usr/include/python2.7/methodobject.h" "$(@D)/python_include/methodobject.h" && cp "/usr/include/python2.7/modsupport.h" "$(@D)/python_include/modsupport.h" && cp "/usr/include/python2.7/moduleobject.h" "$(@D)/python_include/moduleobject.h" && cp "/usr/include/python2.7/node.h" "$(@D)/python_include/node.h" && cp "/usr/include/python2.7/object.h" "$(@D)/python_include/object.h" && cp "/usr/include/python2.7/objimpl.h" "$(@D)/python_include/objimpl.h" && cp "/usr/include/python2.7/opcode.h" "$(@D)/python_include/opcode.h" && cp "/usr/include/python2.7/osdefs.h" "$(@D)/python_include/osdefs.h" && cp "/usr/include/python2.7/parsetok.h" "$(@D)/python_include/parsetok.h" && cp "/usr/include/python2.7/patchlevel.h" "$(@D)/python_include/patchlevel.h" && cp "/usr/include/python2.7/pgen.h" "$(@D)/python_include/pgen.h" && cp "/usr/include/python2.7/pgenheaders.h" "$(@D)/python_include/pgenheaders.h" && cp "/usr/include/python2.7/py_curses.h" "$(@D)/python_include/py_curses.h" && cp "/usr/include/python2.7/pyarena.h" "$(@D)/python_include/pyarena.h" && cp "/usr/include/python2.7/pycapsule.h" "$(@D)/python_include/pycapsule.h" && cp "/usr/include/python2.7/pyconfig.h" "$(@D)/python_include/pyconfig.h" && cp "/usr/include/python2.7/pyctype.h" "$(@D)/python_include/pyctype.h" && cp "/usr/include/python2.7/pydebug.h" "$(@D)/python_include/pydebug.h" && cp "/usr/include/python2.7/pyerrors.h" "$(@D)/python_include/pyerrors.h" && cp "/usr/include/python2.7/pyexpat.h" "$(@D)/python_include/pyexpat.h" && cp "/usr/include/python2.7/pyfpe.h" "$(@D)/python_include/pyfpe.h" && cp "/usr/include/python2.7/pygetopt.h" "$(@D)/python_include/pygetopt.h" && cp "/usr/include/python2.7/pymacconfig.h" "$(@D)/python_include/pymacconfig.h" && cp "/usr/include/python2.7/pymactoolbox.h" "$(@D)/python_include/pymactoolbox.h" && cp "/usr/include/python2.7/pymath.h" "$(@D)/python_include/pymath.h" && cp "/usr/include/python2.7/pymem.h" "$(@D)/python_include/pymem.h" && cp "/usr/include/python2.7/pyport.h" "$(@D)/python_include/pyport.h" && cp "/usr/include/python2.7/pystate.h" "$(@D)/python_include/pystate.h" && cp "/usr/include/python2.7/pystrcmp.h" "$(@D)/python_include/pystrcmp.h" && cp "/usr/include/python2.7/pystrtod.h" "$(@D)/python_include/pystrtod.h" && cp "/usr/include/python2.7/pythonrun.h" "$(@D)/python_include/pythonrun.h" && cp "/usr/include/python2.7/pythread.h" "$(@D)/python_include/pythread.h" && cp "/usr/include/python2.7/rangeobject.h" "$(@D)/python_include/rangeobject.h" && cp "/usr/include/python2.7/setobject.h" "$(@D)/python_include/setobject.h" && cp "/usr/include/python2.7/sliceobject.h" "$(@D)/python_include/sliceobject.h" && cp "/usr/include/python2.7/stringobject.h" "$(@D)/python_include/stringobject.h" && cp "/usr/include/python2.7/structmember.h" "$(@D)/python_include/structmember.h" && cp "/usr/include/python2.7/structseq.h" "$(@D)/python_include/structseq.h" && cp "/usr/include/python2.7/symtable.h" "$(@D)/python_include/symtable.h" && cp "/usr/include/python2.7/sysmodule.h" "$(@D)/python_include/sysmodule.h" && cp "/usr/include/python2.7/timefuncs.h" "$(@D)/python_include/timefuncs.h" && cp "/usr/include/python2.7/token.h" "$(@D)/python_include/token.h" && cp "/usr/include/python2.7/traceback.h" "$(@D)/python_include/traceback.h" && cp "/usr/include/python2.7/tupleobject.h" "$(@D)/python_include/tupleobject.h" && cp "/usr/include/python2.7/ucnhash.h" "$(@D)/python_include/ucnhash.h" && cp "/usr/include/python2.7/unicodeobject.h" "$(@D)/python_include/unicodeobject.h" && cp "/usr/include/python2.7/warnings.h" "$(@D)/python_include/warnings.h" && cp "/usr/include/python2.7/weakrefobject.h" "$(@D)/python_include/weakrefobject.h"
""",
)
genrule(
name = "numpy_include",
outs = [
- "numpy_include/numpy/oldnumeric.h",
- "numpy_include/numpy/npy_1_7_deprecated_api.h",
- "numpy_include/numpy/ufunc_api.txt",
- "numpy_include/numpy/multiarray_api.txt",
- "numpy_include/numpy/halffloat.h",
- "numpy_include/numpy/npy_common.h",
- "numpy_include/numpy/utils.h",
- "numpy_include/numpy/npy_interrupt.h",
- "numpy_include/numpy/npy_endian.h",
+ "numpy_include/numpy/__multiarray_api.h",
"numpy_include/numpy/__ufunc_api.h",
"numpy_include/numpy/_neighborhood_iterator_imp.h",
- "numpy_include/numpy/ufuncobject.h",
+ "numpy_include/numpy/_numpyconfig.h",
+ "numpy_include/numpy/arrayobject.h",
+ "numpy_include/numpy/arrayscalars.h",
+ "numpy_include/numpy/halffloat.h",
+ "numpy_include/numpy/multiarray_api.txt",
+ "numpy_include/numpy/ndarrayobject.h",
"numpy_include/numpy/ndarraytypes.h",
- "numpy_include/numpy/npy_math.h",
"numpy_include/numpy/noprefix.h",
+ "numpy_include/numpy/npy_1_7_deprecated_api.h",
"numpy_include/numpy/npy_3kcompat.h",
- "numpy_include/numpy/arrayscalars.h",
- "numpy_include/numpy/npy_os.h",
- "numpy_include/numpy/ndarrayobject.h",
- "numpy_include/numpy/npy_no_deprecated_api.h",
- "numpy_include/numpy/arrayobject.h",
- "numpy_include/numpy/_numpyconfig.h",
- "numpy_include/numpy/__multiarray_api.h",
+ "numpy_include/numpy/npy_common.h",
"numpy_include/numpy/npy_cpu.h",
- "numpy_include/numpy/old_defines.h",
+ "numpy_include/numpy/npy_endian.h",
+ "numpy_include/numpy/npy_interrupt.h",
+ "numpy_include/numpy/npy_math.h",
+ "numpy_include/numpy/npy_no_deprecated_api.h",
+ "numpy_include/numpy/npy_os.h",
"numpy_include/numpy/numpyconfig.h",
+ "numpy_include/numpy/old_defines.h",
+ "numpy_include/numpy/oldnumeric.h",
+ "numpy_include/numpy/ufunc_api.txt",
+ "numpy_include/numpy/ufuncobject.h",
+ "numpy_include/numpy/utils.h",
],
cmd = """
-cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/oldnumeric.h" "$(@D)/numpy_include/numpy/oldnumeric.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_1_7_deprecated_api.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/ufunc_api.txt" "$(@D)/numpy_include/numpy/ufunc_api.txt" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/multiarray_api.txt" "$(@D)/numpy_include/numpy/multiarray_api.txt" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/halffloat.h" "$(@D)/numpy_include/numpy/halffloat.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_common.h" "$(@D)/numpy_include/numpy/npy_common.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/utils.h" "$(@D)/numpy_include/numpy/utils.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_interrupt.h" "$(@D)/numpy_include/numpy/npy_interrupt.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_endian.h" "$(@D)/numpy_include/numpy/npy_endian.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/__ufunc_api.h" "$(@D)/numpy_include/numpy/__ufunc_api.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h" "$(@D)/numpy_include/numpy/_neighborhood_iterator_imp.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/ufuncobject.h" "$(@D)/numpy_include/numpy/ufuncobject.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/ndarraytypes.h" "$(@D)/numpy_include/numpy/ndarraytypes.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_math.h" "$(@D)/numpy_include/numpy/npy_math.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/noprefix.h" "$(@D)/numpy_include/numpy/noprefix.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_3kcompat.h" "$(@D)/numpy_include/numpy/npy_3kcompat.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/arrayscalars.h" "$(@D)/numpy_include/numpy/arrayscalars.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_os.h" "$(@D)/numpy_include/numpy/npy_os.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/ndarrayobject.h" "$(@D)/numpy_include/numpy/ndarrayobject.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_no_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_no_deprecated_api.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/arrayobject.h" "$(@D)/numpy_include/numpy/arrayobject.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/_numpyconfig.h" "$(@D)/numpy_include/numpy/_numpyconfig.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/__multiarray_api.h" "$(@D)/numpy_include/numpy/__multiarray_api.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_cpu.h" "$(@D)/numpy_include/numpy/npy_cpu.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/old_defines.h" "$(@D)/numpy_include/numpy/old_defines.h" && cp "/usr/lib/python2.7/dist-packages/numpy/core/include/numpy/numpyconfig.h" "$(@D)/numpy_include/numpy/numpyconfig.h"
+cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/__multiarray_api.h" "$(@D)/numpy_include/numpy/__multiarray_api.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/__ufunc_api.h" "$(@D)/numpy_include/numpy/__ufunc_api.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h" "$(@D)/numpy_include/numpy/_neighborhood_iterator_imp.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/_numpyconfig.h" "$(@D)/numpy_include/numpy/_numpyconfig.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/arrayobject.h" "$(@D)/numpy_include/numpy/arrayobject.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/arrayscalars.h" "$(@D)/numpy_include/numpy/arrayscalars.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/halffloat.h" "$(@D)/numpy_include/numpy/halffloat.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/multiarray_api.txt" "$(@D)/numpy_include/numpy/multiarray_api.txt" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/ndarrayobject.h" "$(@D)/numpy_include/numpy/ndarrayobject.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/ndarraytypes.h" "$(@D)/numpy_include/numpy/ndarraytypes.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/noprefix.h" "$(@D)/numpy_include/numpy/noprefix.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_1_7_deprecated_api.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_3kcompat.h" "$(@D)/numpy_include/numpy/npy_3kcompat.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_common.h" "$(@D)/numpy_include/numpy/npy_common.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_cpu.h" "$(@D)/numpy_include/numpy/npy_cpu.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_endian.h" "$(@D)/numpy_include/numpy/npy_endian.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_interrupt.h" "$(@D)/numpy_include/numpy/npy_interrupt.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_math.h" "$(@D)/numpy_include/numpy/npy_math.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_no_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_no_deprecated_api.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/npy_os.h" "$(@D)/numpy_include/numpy/npy_os.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/numpyconfig.h" "$(@D)/numpy_include/numpy/numpyconfig.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/old_defines.h" "$(@D)/numpy_include/numpy/old_defines.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/oldnumeric.h" "$(@D)/numpy_include/numpy/oldnumeric.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/ufunc_api.txt" "$(@D)/numpy_include/numpy/ufunc_api.txt" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/ufuncobject.h" "$(@D)/numpy_include/numpy/ufuncobject.h" && cp "/usr/local/lib/python2.7/dist-packages/numpy/core/include/numpy/utils.h" "$(@D)/numpy_include/numpy/utils.h"
""",
)
diff --git a/third_party/toolchains/cpus/py3/BUILD b/third_party/toolchains/cpus/py3/BUILD
index 932a25239f..d47256ebef 100644
--- a/third_party/toolchains/cpus/py3/BUILD
+++ b/third_party/toolchains/cpus/py3/BUILD
@@ -6,18 +6,24 @@ licenses(["restricted"])
package(default_visibility = ["//visibility:public"])
+# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib
+# See https://docs.python.org/3/extending/windows.html
+cc_import(
+ name = "python_lib",
+ interface_library = select({
+ ":windows": ":python_import_lib",
+ # A placeholder for Unix platforms which makes --no_build happy.
+ "//conditions:default": "not-existing.lib",
+ }),
+ system_provided = 1,
+)
+
cc_library(
name = "python_headers",
hdrs = [":python_include"],
- data = select({
- ":windows": [":python_import_lib"],
- "//conditions:default": [],
- }),
includes = ["python_include"],
- linkopts = select({
- # TODO(pcloudy): Ideally, this should just go into deps after resolving
- # https://github.com/bazelbuild/bazel/issues/3237,
- ":windows": ["$(locations :python_import_lib)"],
+ deps = select({
+ ":windows": [":python_lib"],
"//conditions:default": [],
}),
)
@@ -37,143 +43,143 @@ config_setting(
genrule(
name = "python_include",
outs = [
- "python_include/code.h",
- "python_include/dtoa.h",
- "python_include/tupleobject.h",
- "python_include/object.h",
- "python_include/ast.h",
- "python_include/pymacconfig.h",
- "python_include/errcode.h",
- "python_include/frameobject.h",
- "python_include/typeslots.h",
- "python_include/pgenheaders.h",
- "python_include/cellobject.h",
- "python_include/pythread.h",
- "python_include/boolobject.h",
+ "python_include/Python-ast.h",
+ "python_include/Python.h",
+ "python_include/abstract.h",
"python_include/accu.h",
- "python_include/modsupport.h",
- "python_include/import.h",
- "python_include/pymath.h",
- "python_include/node.h",
- "python_include/funcobject.h",
- "python_include/eval.h",
- "python_include/pyatomic.h",
- "python_include/longintrepr.h",
- "python_include/floatobject.h",
- "python_include/rangeobject.h",
- "python_include/pyfpe.h",
- "python_include/pystrcmp.h",
- "python_include/fileutils.h",
- "python_include/dictobject.h",
- "python_include/pyarena.h",
- "python_include/osmodule.h",
- "python_include/objimpl.h",
+ "python_include/asdl.h",
+ "python_include/ast.h",
"python_include/bitset.h",
- "python_include/memoryobject.h",
+ "python_include/bltinmodule.h",
+ "python_include/boolobject.h",
"python_include/bytearrayobject.h",
- "python_include/pydebug.h",
- "python_include/pyerrors.h",
- "python_include/weakrefobject.h",
- "python_include/grammar.h",
- "python_include/symtable.h",
- "python_include/longobject.h",
- "python_include/structmember.h",
- "python_include/enumobject.h",
- "python_include/pymacro.h",
+ "python_include/bytes_methods.h",
+ "python_include/bytesobject.h",
+ "python_include/cellobject.h",
+ "python_include/ceval.h",
"python_include/classobject.h",
- "python_include/unicodeobject.h",
- "python_include/sliceobject.h",
- "python_include/pystrtod.h",
- "python_include/genobject.h",
- "python_include/compile.h",
- "python_include/pyexpat.h",
- "python_include/asdl.h",
+ "python_include/code.h",
"python_include/codecs.h",
+ "python_include/compile.h",
+ "python_include/complexobject.h",
+ "python_include/datetime.h",
+ "python_include/descrobject.h",
+ "python_include/dictobject.h",
+ "python_include/dtoa.h",
"python_include/dynamic_annotations.h",
- "python_include/pyctype.h",
- "python_include/sysmodule.h",
- "python_include/methodobject.h",
+ "python_include/enumobject.h",
+ "python_include/errcode.h",
+ "python_include/eval.h",
+ "python_include/fileobject.h",
+ "python_include/fileutils.h",
+ "python_include/floatobject.h",
+ "python_include/frameobject.h",
+ "python_include/funcobject.h",
+ "python_include/genobject.h",
"python_include/graminit.h",
- "python_include/bltinmodule.h",
+ "python_include/grammar.h",
+ "python_include/import.h",
"python_include/intrcheck.h",
- "python_include/pyport.h",
- "python_include/warnings.h",
- "python_include/osdefs.h",
- "python_include/pydtrace.h",
- "python_include/pylifecycle.h",
- "python_include/fileobject.h",
- "python_include/pytime.h",
- "python_include/traceback.h",
- "python_include/ceval.h",
- "python_include/bytes_methods.h",
- "python_include/namespaceobject.h",
- "python_include/pyconfig.h",
- "python_include/Python.h",
+ "python_include/iterobject.h",
+ "python_include/listobject.h",
+ "python_include/longintrepr.h",
+ "python_include/longobject.h",
+ "python_include/marshal.h",
+ "python_include/memoryobject.h",
+ "python_include/metagrammar.h",
+ "python_include/methodobject.h",
+ "python_include/modsupport.h",
"python_include/moduleobject.h",
- "python_include/pystate.h",
- "python_include/descrobject.h",
+ "python_include/namespaceobject.h",
+ "python_include/node.h",
+ "python_include/object.h",
+ "python_include/objimpl.h",
"python_include/odictobject.h",
- "python_include/ucnhash.h",
+ "python_include/opcode.h",
+ "python_include/osdefs.h",
+ "python_include/osmodule.h",
+ "python_include/parsetok.h",
+ "python_include/patchlevel.h",
+ "python_include/pgen.h",
+ "python_include/pgenheaders.h",
+ "python_include/py_curses.h",
+ "python_include/pyarena.h",
+ "python_include/pyatomic.h",
+ "python_include/pycapsule.h",
+ "python_include/pyconfig.h",
+ "python_include/pyctype.h",
+ "python_include/pydebug.h",
+ "python_include/pydtrace.h",
+ "python_include/pyerrors.h",
+ "python_include/pyexpat.h",
+ "python_include/pyfpe.h",
"python_include/pygetopt.h",
+ "python_include/pyhash.h",
+ "python_include/pylifecycle.h",
+ "python_include/pymacconfig.h",
+ "python_include/pymacro.h",
+ "python_include/pymath.h",
"python_include/pymem.h",
- "python_include/complexobject.h",
- "python_include/structseq.h",
- "python_include/datetime.h",
+ "python_include/pyport.h",
+ "python_include/pystate.h",
+ "python_include/pystrcmp.h",
+ "python_include/pystrhex.h",
+ "python_include/pystrtod.h",
"python_include/pythonrun.h",
- "python_include/pyhash.h",
- "python_include/pycapsule.h",
+ "python_include/pythread.h",
+ "python_include/pytime.h",
+ "python_include/rangeobject.h",
"python_include/setobject.h",
- "python_include/listobject.h",
- "python_include/bytesobject.h",
- "python_include/pgen.h",
- "python_include/patchlevel.h",
- "python_include/opcode.h",
- "python_include/parsetok.h",
- "python_include/pystrhex.h",
- "python_include/marshal.h",
+ "python_include/sliceobject.h",
+ "python_include/structmember.h",
+ "python_include/structseq.h",
+ "python_include/symtable.h",
+ "python_include/sysmodule.h",
"python_include/token.h",
- "python_include/iterobject.h",
- "python_include/abstract.h",
- "python_include/py_curses.h",
- "python_include/metagrammar.h",
- "python_include/Python-ast.h",
+ "python_include/traceback.h",
+ "python_include/tupleobject.h",
+ "python_include/typeslots.h",
+ "python_include/ucnhash.h",
+ "python_include/unicodeobject.h",
+ "python_include/warnings.h",
+ "python_include/weakrefobject.h",
],
cmd = """
-cp "/opt/python3.6/include/python3.6m/code.h" "$(@D)/python_include/code.h" && cp "/opt/python3.6/include/python3.6m/dtoa.h" "$(@D)/python_include/dtoa.h" && cp "/opt/python3.6/include/python3.6m/tupleobject.h" "$(@D)/python_include/tupleobject.h" && cp "/opt/python3.6/include/python3.6m/object.h" "$(@D)/python_include/object.h" && cp "/opt/python3.6/include/python3.6m/ast.h" "$(@D)/python_include/ast.h" && cp "/opt/python3.6/include/python3.6m/pymacconfig.h" "$(@D)/python_include/pymacconfig.h" && cp "/opt/python3.6/include/python3.6m/errcode.h" "$(@D)/python_include/errcode.h" && cp "/opt/python3.6/include/python3.6m/frameobject.h" "$(@D)/python_include/frameobject.h" && cp "/opt/python3.6/include/python3.6m/typeslots.h" "$(@D)/python_include/typeslots.h" && cp "/opt/python3.6/include/python3.6m/pgenheaders.h" "$(@D)/python_include/pgenheaders.h" && cp "/opt/python3.6/include/python3.6m/cellobject.h" "$(@D)/python_include/cellobject.h" && cp "/opt/python3.6/include/python3.6m/pythread.h" "$(@D)/python_include/pythread.h" && cp "/opt/python3.6/include/python3.6m/boolobject.h" "$(@D)/python_include/boolobject.h" && cp "/opt/python3.6/include/python3.6m/accu.h" "$(@D)/python_include/accu.h" && cp "/opt/python3.6/include/python3.6m/modsupport.h" "$(@D)/python_include/modsupport.h" && cp "/opt/python3.6/include/python3.6m/import.h" "$(@D)/python_include/import.h" && cp "/opt/python3.6/include/python3.6m/pymath.h" "$(@D)/python_include/pymath.h" && cp "/opt/python3.6/include/python3.6m/node.h" "$(@D)/python_include/node.h" && cp "/opt/python3.6/include/python3.6m/funcobject.h" "$(@D)/python_include/funcobject.h" && cp "/opt/python3.6/include/python3.6m/eval.h" "$(@D)/python_include/eval.h" && cp "/opt/python3.6/include/python3.6m/pyatomic.h" "$(@D)/python_include/pyatomic.h" && cp "/opt/python3.6/include/python3.6m/longintrepr.h" "$(@D)/python_include/longintrepr.h" && cp "/opt/python3.6/include/python3.6m/floatobject.h" "$(@D)/python_include/floatobject.h" && cp "/opt/python3.6/include/python3.6m/rangeobject.h" "$(@D)/python_include/rangeobject.h" && cp "/opt/python3.6/include/python3.6m/pyfpe.h" "$(@D)/python_include/pyfpe.h" && cp "/opt/python3.6/include/python3.6m/pystrcmp.h" "$(@D)/python_include/pystrcmp.h" && cp "/opt/python3.6/include/python3.6m/fileutils.h" "$(@D)/python_include/fileutils.h" && cp "/opt/python3.6/include/python3.6m/dictobject.h" "$(@D)/python_include/dictobject.h" && cp "/opt/python3.6/include/python3.6m/pyarena.h" "$(@D)/python_include/pyarena.h" && cp "/opt/python3.6/include/python3.6m/osmodule.h" "$(@D)/python_include/osmodule.h" && cp "/opt/python3.6/include/python3.6m/objimpl.h" "$(@D)/python_include/objimpl.h" && cp "/opt/python3.6/include/python3.6m/bitset.h" "$(@D)/python_include/bitset.h" && cp "/opt/python3.6/include/python3.6m/memoryobject.h" "$(@D)/python_include/memoryobject.h" && cp "/opt/python3.6/include/python3.6m/bytearrayobject.h" "$(@D)/python_include/bytearrayobject.h" && cp "/opt/python3.6/include/python3.6m/pydebug.h" "$(@D)/python_include/pydebug.h" && cp "/opt/python3.6/include/python3.6m/pyerrors.h" "$(@D)/python_include/pyerrors.h" && cp "/opt/python3.6/include/python3.6m/weakrefobject.h" "$(@D)/python_include/weakrefobject.h" && cp "/opt/python3.6/include/python3.6m/grammar.h" "$(@D)/python_include/grammar.h" && cp "/opt/python3.6/include/python3.6m/symtable.h" "$(@D)/python_include/symtable.h" && cp "/opt/python3.6/include/python3.6m/longobject.h" "$(@D)/python_include/longobject.h" && cp "/opt/python3.6/include/python3.6m/structmember.h" "$(@D)/python_include/structmember.h" && cp "/opt/python3.6/include/python3.6m/enumobject.h" "$(@D)/python_include/enumobject.h" && cp "/opt/python3.6/include/python3.6m/pymacro.h" "$(@D)/python_include/pymacro.h" && cp "/opt/python3.6/include/python3.6m/classobject.h" "$(@D)/python_include/classobject.h" && cp "/opt/python3.6/include/python3.6m/unicodeobject.h" "$(@D)/python_include/unicodeobject.h" && cp "/opt/python3.6/include/python3.6m/sliceobject.h" "$(@D)/python_include/sliceobject.h" && cp "/opt/python3.6/include/python3.6m/pystrtod.h" "$(@D)/python_include/pystrtod.h" && cp "/opt/python3.6/include/python3.6m/genobject.h" "$(@D)/python_include/genobject.h" && cp "/opt/python3.6/include/python3.6m/compile.h" "$(@D)/python_include/compile.h" && cp "/opt/python3.6/include/python3.6m/pyexpat.h" "$(@D)/python_include/pyexpat.h" && cp "/opt/python3.6/include/python3.6m/asdl.h" "$(@D)/python_include/asdl.h" && cp "/opt/python3.6/include/python3.6m/codecs.h" "$(@D)/python_include/codecs.h" && cp "/opt/python3.6/include/python3.6m/dynamic_annotations.h" "$(@D)/python_include/dynamic_annotations.h" && cp "/opt/python3.6/include/python3.6m/pyctype.h" "$(@D)/python_include/pyctype.h" && cp "/opt/python3.6/include/python3.6m/sysmodule.h" "$(@D)/python_include/sysmodule.h" && cp "/opt/python3.6/include/python3.6m/methodobject.h" "$(@D)/python_include/methodobject.h" && cp "/opt/python3.6/include/python3.6m/graminit.h" "$(@D)/python_include/graminit.h" && cp "/opt/python3.6/include/python3.6m/bltinmodule.h" "$(@D)/python_include/bltinmodule.h" && cp "/opt/python3.6/include/python3.6m/intrcheck.h" "$(@D)/python_include/intrcheck.h" && cp "/opt/python3.6/include/python3.6m/pyport.h" "$(@D)/python_include/pyport.h" && cp "/opt/python3.6/include/python3.6m/warnings.h" "$(@D)/python_include/warnings.h" && cp "/opt/python3.6/include/python3.6m/osdefs.h" "$(@D)/python_include/osdefs.h" && cp "/opt/python3.6/include/python3.6m/pydtrace.h" "$(@D)/python_include/pydtrace.h" && cp "/opt/python3.6/include/python3.6m/pylifecycle.h" "$(@D)/python_include/pylifecycle.h" && cp "/opt/python3.6/include/python3.6m/fileobject.h" "$(@D)/python_include/fileobject.h" && cp "/opt/python3.6/include/python3.6m/pytime.h" "$(@D)/python_include/pytime.h" && cp "/opt/python3.6/include/python3.6m/traceback.h" "$(@D)/python_include/traceback.h" && cp "/opt/python3.6/include/python3.6m/ceval.h" "$(@D)/python_include/ceval.h" && cp "/opt/python3.6/include/python3.6m/bytes_methods.h" "$(@D)/python_include/bytes_methods.h" && cp "/opt/python3.6/include/python3.6m/namespaceobject.h" "$(@D)/python_include/namespaceobject.h" && cp "/opt/python3.6/include/python3.6m/pyconfig.h" "$(@D)/python_include/pyconfig.h" && cp "/opt/python3.6/include/python3.6m/Python.h" "$(@D)/python_include/Python.h" && cp "/opt/python3.6/include/python3.6m/moduleobject.h" "$(@D)/python_include/moduleobject.h" && cp "/opt/python3.6/include/python3.6m/pystate.h" "$(@D)/python_include/pystate.h" && cp "/opt/python3.6/include/python3.6m/descrobject.h" "$(@D)/python_include/descrobject.h" && cp "/opt/python3.6/include/python3.6m/odictobject.h" "$(@D)/python_include/odictobject.h" && cp "/opt/python3.6/include/python3.6m/ucnhash.h" "$(@D)/python_include/ucnhash.h" && cp "/opt/python3.6/include/python3.6m/pygetopt.h" "$(@D)/python_include/pygetopt.h" && cp "/opt/python3.6/include/python3.6m/pymem.h" "$(@D)/python_include/pymem.h" && cp "/opt/python3.6/include/python3.6m/complexobject.h" "$(@D)/python_include/complexobject.h" && cp "/opt/python3.6/include/python3.6m/structseq.h" "$(@D)/python_include/structseq.h" && cp "/opt/python3.6/include/python3.6m/datetime.h" "$(@D)/python_include/datetime.h" && cp "/opt/python3.6/include/python3.6m/pythonrun.h" "$(@D)/python_include/pythonrun.h" && cp "/opt/python3.6/include/python3.6m/pyhash.h" "$(@D)/python_include/pyhash.h" && cp "/opt/python3.6/include/python3.6m/pycapsule.h" "$(@D)/python_include/pycapsule.h" && cp "/opt/python3.6/include/python3.6m/setobject.h" "$(@D)/python_include/setobject.h" && cp "/opt/python3.6/include/python3.6m/listobject.h" "$(@D)/python_include/listobject.h" && cp "/opt/python3.6/include/python3.6m/bytesobject.h" "$(@D)/python_include/bytesobject.h" && cp "/opt/python3.6/include/python3.6m/pgen.h" "$(@D)/python_include/pgen.h" && cp "/opt/python3.6/include/python3.6m/patchlevel.h" "$(@D)/python_include/patchlevel.h" && cp "/opt/python3.6/include/python3.6m/opcode.h" "$(@D)/python_include/opcode.h" && cp "/opt/python3.6/include/python3.6m/parsetok.h" "$(@D)/python_include/parsetok.h" && cp "/opt/python3.6/include/python3.6m/pystrhex.h" "$(@D)/python_include/pystrhex.h" && cp "/opt/python3.6/include/python3.6m/marshal.h" "$(@D)/python_include/marshal.h" && cp "/opt/python3.6/include/python3.6m/token.h" "$(@D)/python_include/token.h" && cp "/opt/python3.6/include/python3.6m/iterobject.h" "$(@D)/python_include/iterobject.h" && cp "/opt/python3.6/include/python3.6m/abstract.h" "$(@D)/python_include/abstract.h" && cp "/opt/python3.6/include/python3.6m/py_curses.h" "$(@D)/python_include/py_curses.h" && cp "/opt/python3.6/include/python3.6m/metagrammar.h" "$(@D)/python_include/metagrammar.h" && cp "/opt/python3.6/include/python3.6m/Python-ast.h" "$(@D)/python_include/Python-ast.h"
+cp "/opt/python3.6/include/python3.6m/Python-ast.h" "$(@D)/python_include/Python-ast.h" && cp "/opt/python3.6/include/python3.6m/Python.h" "$(@D)/python_include/Python.h" && cp "/opt/python3.6/include/python3.6m/abstract.h" "$(@D)/python_include/abstract.h" && cp "/opt/python3.6/include/python3.6m/accu.h" "$(@D)/python_include/accu.h" && cp "/opt/python3.6/include/python3.6m/asdl.h" "$(@D)/python_include/asdl.h" && cp "/opt/python3.6/include/python3.6m/ast.h" "$(@D)/python_include/ast.h" && cp "/opt/python3.6/include/python3.6m/bitset.h" "$(@D)/python_include/bitset.h" && cp "/opt/python3.6/include/python3.6m/bltinmodule.h" "$(@D)/python_include/bltinmodule.h" && cp "/opt/python3.6/include/python3.6m/boolobject.h" "$(@D)/python_include/boolobject.h" && cp "/opt/python3.6/include/python3.6m/bytearrayobject.h" "$(@D)/python_include/bytearrayobject.h" && cp "/opt/python3.6/include/python3.6m/bytes_methods.h" "$(@D)/python_include/bytes_methods.h" && cp "/opt/python3.6/include/python3.6m/bytesobject.h" "$(@D)/python_include/bytesobject.h" && cp "/opt/python3.6/include/python3.6m/cellobject.h" "$(@D)/python_include/cellobject.h" && cp "/opt/python3.6/include/python3.6m/ceval.h" "$(@D)/python_include/ceval.h" && cp "/opt/python3.6/include/python3.6m/classobject.h" "$(@D)/python_include/classobject.h" && cp "/opt/python3.6/include/python3.6m/code.h" "$(@D)/python_include/code.h" && cp "/opt/python3.6/include/python3.6m/codecs.h" "$(@D)/python_include/codecs.h" && cp "/opt/python3.6/include/python3.6m/compile.h" "$(@D)/python_include/compile.h" && cp "/opt/python3.6/include/python3.6m/complexobject.h" "$(@D)/python_include/complexobject.h" && cp "/opt/python3.6/include/python3.6m/datetime.h" "$(@D)/python_include/datetime.h" && cp "/opt/python3.6/include/python3.6m/descrobject.h" "$(@D)/python_include/descrobject.h" && cp "/opt/python3.6/include/python3.6m/dictobject.h" "$(@D)/python_include/dictobject.h" && cp "/opt/python3.6/include/python3.6m/dtoa.h" "$(@D)/python_include/dtoa.h" && cp "/opt/python3.6/include/python3.6m/dynamic_annotations.h" "$(@D)/python_include/dynamic_annotations.h" && cp "/opt/python3.6/include/python3.6m/enumobject.h" "$(@D)/python_include/enumobject.h" && cp "/opt/python3.6/include/python3.6m/errcode.h" "$(@D)/python_include/errcode.h" && cp "/opt/python3.6/include/python3.6m/eval.h" "$(@D)/python_include/eval.h" && cp "/opt/python3.6/include/python3.6m/fileobject.h" "$(@D)/python_include/fileobject.h" && cp "/opt/python3.6/include/python3.6m/fileutils.h" "$(@D)/python_include/fileutils.h" && cp "/opt/python3.6/include/python3.6m/floatobject.h" "$(@D)/python_include/floatobject.h" && cp "/opt/python3.6/include/python3.6m/frameobject.h" "$(@D)/python_include/frameobject.h" && cp "/opt/python3.6/include/python3.6m/funcobject.h" "$(@D)/python_include/funcobject.h" && cp "/opt/python3.6/include/python3.6m/genobject.h" "$(@D)/python_include/genobject.h" && cp "/opt/python3.6/include/python3.6m/graminit.h" "$(@D)/python_include/graminit.h" && cp "/opt/python3.6/include/python3.6m/grammar.h" "$(@D)/python_include/grammar.h" && cp "/opt/python3.6/include/python3.6m/import.h" "$(@D)/python_include/import.h" && cp "/opt/python3.6/include/python3.6m/intrcheck.h" "$(@D)/python_include/intrcheck.h" && cp "/opt/python3.6/include/python3.6m/iterobject.h" "$(@D)/python_include/iterobject.h" && cp "/opt/python3.6/include/python3.6m/listobject.h" "$(@D)/python_include/listobject.h" && cp "/opt/python3.6/include/python3.6m/longintrepr.h" "$(@D)/python_include/longintrepr.h" && cp "/opt/python3.6/include/python3.6m/longobject.h" "$(@D)/python_include/longobject.h" && cp "/opt/python3.6/include/python3.6m/marshal.h" "$(@D)/python_include/marshal.h" && cp "/opt/python3.6/include/python3.6m/memoryobject.h" "$(@D)/python_include/memoryobject.h" && cp "/opt/python3.6/include/python3.6m/metagrammar.h" "$(@D)/python_include/metagrammar.h" && cp "/opt/python3.6/include/python3.6m/methodobject.h" "$(@D)/python_include/methodobject.h" && cp "/opt/python3.6/include/python3.6m/modsupport.h" "$(@D)/python_include/modsupport.h" && cp "/opt/python3.6/include/python3.6m/moduleobject.h" "$(@D)/python_include/moduleobject.h" && cp "/opt/python3.6/include/python3.6m/namespaceobject.h" "$(@D)/python_include/namespaceobject.h" && cp "/opt/python3.6/include/python3.6m/node.h" "$(@D)/python_include/node.h" && cp "/opt/python3.6/include/python3.6m/object.h" "$(@D)/python_include/object.h" && cp "/opt/python3.6/include/python3.6m/objimpl.h" "$(@D)/python_include/objimpl.h" && cp "/opt/python3.6/include/python3.6m/odictobject.h" "$(@D)/python_include/odictobject.h" && cp "/opt/python3.6/include/python3.6m/opcode.h" "$(@D)/python_include/opcode.h" && cp "/opt/python3.6/include/python3.6m/osdefs.h" "$(@D)/python_include/osdefs.h" && cp "/opt/python3.6/include/python3.6m/osmodule.h" "$(@D)/python_include/osmodule.h" && cp "/opt/python3.6/include/python3.6m/parsetok.h" "$(@D)/python_include/parsetok.h" && cp "/opt/python3.6/include/python3.6m/patchlevel.h" "$(@D)/python_include/patchlevel.h" && cp "/opt/python3.6/include/python3.6m/pgen.h" "$(@D)/python_include/pgen.h" && cp "/opt/python3.6/include/python3.6m/pgenheaders.h" "$(@D)/python_include/pgenheaders.h" && cp "/opt/python3.6/include/python3.6m/py_curses.h" "$(@D)/python_include/py_curses.h" && cp "/opt/python3.6/include/python3.6m/pyarena.h" "$(@D)/python_include/pyarena.h" && cp "/opt/python3.6/include/python3.6m/pyatomic.h" "$(@D)/python_include/pyatomic.h" && cp "/opt/python3.6/include/python3.6m/pycapsule.h" "$(@D)/python_include/pycapsule.h" && cp "/opt/python3.6/include/python3.6m/pyconfig.h" "$(@D)/python_include/pyconfig.h" && cp "/opt/python3.6/include/python3.6m/pyctype.h" "$(@D)/python_include/pyctype.h" && cp "/opt/python3.6/include/python3.6m/pydebug.h" "$(@D)/python_include/pydebug.h" && cp "/opt/python3.6/include/python3.6m/pydtrace.h" "$(@D)/python_include/pydtrace.h" && cp "/opt/python3.6/include/python3.6m/pyerrors.h" "$(@D)/python_include/pyerrors.h" && cp "/opt/python3.6/include/python3.6m/pyexpat.h" "$(@D)/python_include/pyexpat.h" && cp "/opt/python3.6/include/python3.6m/pyfpe.h" "$(@D)/python_include/pyfpe.h" && cp "/opt/python3.6/include/python3.6m/pygetopt.h" "$(@D)/python_include/pygetopt.h" && cp "/opt/python3.6/include/python3.6m/pyhash.h" "$(@D)/python_include/pyhash.h" && cp "/opt/python3.6/include/python3.6m/pylifecycle.h" "$(@D)/python_include/pylifecycle.h" && cp "/opt/python3.6/include/python3.6m/pymacconfig.h" "$(@D)/python_include/pymacconfig.h" && cp "/opt/python3.6/include/python3.6m/pymacro.h" "$(@D)/python_include/pymacro.h" && cp "/opt/python3.6/include/python3.6m/pymath.h" "$(@D)/python_include/pymath.h" && cp "/opt/python3.6/include/python3.6m/pymem.h" "$(@D)/python_include/pymem.h" && cp "/opt/python3.6/include/python3.6m/pyport.h" "$(@D)/python_include/pyport.h" && cp "/opt/python3.6/include/python3.6m/pystate.h" "$(@D)/python_include/pystate.h" && cp "/opt/python3.6/include/python3.6m/pystrcmp.h" "$(@D)/python_include/pystrcmp.h" && cp "/opt/python3.6/include/python3.6m/pystrhex.h" "$(@D)/python_include/pystrhex.h" && cp "/opt/python3.6/include/python3.6m/pystrtod.h" "$(@D)/python_include/pystrtod.h" && cp "/opt/python3.6/include/python3.6m/pythonrun.h" "$(@D)/python_include/pythonrun.h" && cp "/opt/python3.6/include/python3.6m/pythread.h" "$(@D)/python_include/pythread.h" && cp "/opt/python3.6/include/python3.6m/pytime.h" "$(@D)/python_include/pytime.h" && cp "/opt/python3.6/include/python3.6m/rangeobject.h" "$(@D)/python_include/rangeobject.h" && cp "/opt/python3.6/include/python3.6m/setobject.h" "$(@D)/python_include/setobject.h" && cp "/opt/python3.6/include/python3.6m/sliceobject.h" "$(@D)/python_include/sliceobject.h" && cp "/opt/python3.6/include/python3.6m/structmember.h" "$(@D)/python_include/structmember.h" && cp "/opt/python3.6/include/python3.6m/structseq.h" "$(@D)/python_include/structseq.h" && cp "/opt/python3.6/include/python3.6m/symtable.h" "$(@D)/python_include/symtable.h" && cp "/opt/python3.6/include/python3.6m/sysmodule.h" "$(@D)/python_include/sysmodule.h" && cp "/opt/python3.6/include/python3.6m/token.h" "$(@D)/python_include/token.h" && cp "/opt/python3.6/include/python3.6m/traceback.h" "$(@D)/python_include/traceback.h" && cp "/opt/python3.6/include/python3.6m/tupleobject.h" "$(@D)/python_include/tupleobject.h" && cp "/opt/python3.6/include/python3.6m/typeslots.h" "$(@D)/python_include/typeslots.h" && cp "/opt/python3.6/include/python3.6m/ucnhash.h" "$(@D)/python_include/ucnhash.h" && cp "/opt/python3.6/include/python3.6m/unicodeobject.h" "$(@D)/python_include/unicodeobject.h" && cp "/opt/python3.6/include/python3.6m/warnings.h" "$(@D)/python_include/warnings.h" && cp "/opt/python3.6/include/python3.6m/weakrefobject.h" "$(@D)/python_include/weakrefobject.h"
""",
)
genrule(
name = "numpy_include",
outs = [
- "numpy_include/numpy/oldnumeric.h",
- "numpy_include/numpy/npy_1_7_deprecated_api.h",
- "numpy_include/numpy/ufunc_api.txt",
- "numpy_include/numpy/multiarray_api.txt",
- "numpy_include/numpy/halffloat.h",
- "numpy_include/numpy/npy_common.h",
- "numpy_include/numpy/utils.h",
- "numpy_include/numpy/npy_interrupt.h",
- "numpy_include/numpy/npy_endian.h",
+ "numpy_include/numpy/__multiarray_api.h",
"numpy_include/numpy/__ufunc_api.h",
"numpy_include/numpy/_neighborhood_iterator_imp.h",
- "numpy_include/numpy/ufuncobject.h",
+ "numpy_include/numpy/_numpyconfig.h",
+ "numpy_include/numpy/arrayobject.h",
+ "numpy_include/numpy/arrayscalars.h",
+ "numpy_include/numpy/halffloat.h",
+ "numpy_include/numpy/multiarray_api.txt",
+ "numpy_include/numpy/ndarrayobject.h",
"numpy_include/numpy/ndarraytypes.h",
- "numpy_include/numpy/npy_math.h",
"numpy_include/numpy/noprefix.h",
+ "numpy_include/numpy/npy_1_7_deprecated_api.h",
"numpy_include/numpy/npy_3kcompat.h",
- "numpy_include/numpy/arrayscalars.h",
- "numpy_include/numpy/npy_os.h",
- "numpy_include/numpy/ndarrayobject.h",
- "numpy_include/numpy/npy_no_deprecated_api.h",
- "numpy_include/numpy/arrayobject.h",
- "numpy_include/numpy/_numpyconfig.h",
- "numpy_include/numpy/__multiarray_api.h",
+ "numpy_include/numpy/npy_common.h",
"numpy_include/numpy/npy_cpu.h",
- "numpy_include/numpy/old_defines.h",
+ "numpy_include/numpy/npy_endian.h",
+ "numpy_include/numpy/npy_interrupt.h",
+ "numpy_include/numpy/npy_math.h",
+ "numpy_include/numpy/npy_no_deprecated_api.h",
+ "numpy_include/numpy/npy_os.h",
"numpy_include/numpy/numpyconfig.h",
+ "numpy_include/numpy/old_defines.h",
+ "numpy_include/numpy/oldnumeric.h",
+ "numpy_include/numpy/ufunc_api.txt",
+ "numpy_include/numpy/ufuncobject.h",
+ "numpy_include/numpy/utils.h",
],
cmd = """
-cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/oldnumeric.h" "$(@D)/numpy_include/numpy/oldnumeric.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_1_7_deprecated_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ufunc_api.txt" "$(@D)/numpy_include/numpy/ufunc_api.txt" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/multiarray_api.txt" "$(@D)/numpy_include/numpy/multiarray_api.txt" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/halffloat.h" "$(@D)/numpy_include/numpy/halffloat.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_common.h" "$(@D)/numpy_include/numpy/npy_common.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/utils.h" "$(@D)/numpy_include/numpy/utils.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_interrupt.h" "$(@D)/numpy_include/numpy/npy_interrupt.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_endian.h" "$(@D)/numpy_include/numpy/npy_endian.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/__ufunc_api.h" "$(@D)/numpy_include/numpy/__ufunc_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h" "$(@D)/numpy_include/numpy/_neighborhood_iterator_imp.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ufuncobject.h" "$(@D)/numpy_include/numpy/ufuncobject.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ndarraytypes.h" "$(@D)/numpy_include/numpy/ndarraytypes.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_math.h" "$(@D)/numpy_include/numpy/npy_math.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/noprefix.h" "$(@D)/numpy_include/numpy/noprefix.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_3kcompat.h" "$(@D)/numpy_include/numpy/npy_3kcompat.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/arrayscalars.h" "$(@D)/numpy_include/numpy/arrayscalars.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_os.h" "$(@D)/numpy_include/numpy/npy_os.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ndarrayobject.h" "$(@D)/numpy_include/numpy/ndarrayobject.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_no_deprecated_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/arrayobject.h" "$(@D)/numpy_include/numpy/arrayobject.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/_numpyconfig.h" "$(@D)/numpy_include/numpy/_numpyconfig.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/__multiarray_api.h" "$(@D)/numpy_include/numpy/__multiarray_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_cpu.h" "$(@D)/numpy_include/numpy/npy_cpu.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/old_defines.h" "$(@D)/numpy_include/numpy/old_defines.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/numpyconfig.h" "$(@D)/numpy_include/numpy/numpyconfig.h"
+cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/__multiarray_api.h" "$(@D)/numpy_include/numpy/__multiarray_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/__ufunc_api.h" "$(@D)/numpy_include/numpy/__ufunc_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h" "$(@D)/numpy_include/numpy/_neighborhood_iterator_imp.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/_numpyconfig.h" "$(@D)/numpy_include/numpy/_numpyconfig.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/arrayobject.h" "$(@D)/numpy_include/numpy/arrayobject.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/arrayscalars.h" "$(@D)/numpy_include/numpy/arrayscalars.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/halffloat.h" "$(@D)/numpy_include/numpy/halffloat.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/multiarray_api.txt" "$(@D)/numpy_include/numpy/multiarray_api.txt" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ndarrayobject.h" "$(@D)/numpy_include/numpy/ndarrayobject.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ndarraytypes.h" "$(@D)/numpy_include/numpy/ndarraytypes.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/noprefix.h" "$(@D)/numpy_include/numpy/noprefix.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_1_7_deprecated_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_3kcompat.h" "$(@D)/numpy_include/numpy/npy_3kcompat.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_common.h" "$(@D)/numpy_include/numpy/npy_common.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_cpu.h" "$(@D)/numpy_include/numpy/npy_cpu.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_endian.h" "$(@D)/numpy_include/numpy/npy_endian.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_interrupt.h" "$(@D)/numpy_include/numpy/npy_interrupt.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_math.h" "$(@D)/numpy_include/numpy/npy_math.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_no_deprecated_api.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/npy_os.h" "$(@D)/numpy_include/numpy/npy_os.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/numpyconfig.h" "$(@D)/numpy_include/numpy/numpyconfig.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/old_defines.h" "$(@D)/numpy_include/numpy/old_defines.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/oldnumeric.h" "$(@D)/numpy_include/numpy/oldnumeric.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ufunc_api.txt" "$(@D)/numpy_include/numpy/ufunc_api.txt" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/ufuncobject.h" "$(@D)/numpy_include/numpy/ufuncobject.h" && cp "/opt/python3.6/lib/python3.6/site-packages/numpy/core/include/numpy/utils.h" "$(@D)/numpy_include/numpy/utils.h"
""",
)
diff --git a/tools/bazel.rc b/tools/bazel.rc
index 3559375d5c..913c4bc333 100644
--- a/tools/bazel.rc
+++ b/tools/bazel.rc
@@ -27,6 +27,10 @@ build --define framework_shared_object=true
build:mkl --define=using_mkl=true
build:mkl -c opt
+# This config option is used to enable MKL-DNN open source library only,
+# without depending on MKL binary version.
+build:mkl_open_source_only --define=using_mkl_dnn_only=true
+
build:download_clang --crosstool_top=@local_config_download_clang//:toolchain
build:download_clang --define=using_clang=true